code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
__lowerCAmelCase = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
__lowerCAmelCase = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
_a : Optional[Any] = set()
_a : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a : int = char
_a : str = set(__a )
return pairs
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] ,_a : Tuple ,_a : Optional[Any] ,_a : str="<s>" ,_a : Any="</s>" ,_a : List[str]="</s>" ,_a : str="<s>" ,_a : Optional[int]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,**_A ,)
_a : str = vocab_file
_a : List[Any] = merges_file
_a : List[str] = {}
_a : Optional[int] = 0
_a : List[str] = 1
_a : Optional[Any] = 2
_a : Union[str, Any] = 3
self.add_from_file(_A )
_a : Tuple = {v: k for k, v in self.encoder.items()}
with open(_A ,encoding='utf-8' ) as merges_handle:
_a : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_a : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
_a : Tuple = dict(zip(_A ,range(len(_A ) ) ) )
_a : List[Any] = {}
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Any = [self.cls_token_id]
_a : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __lowercase ( self : List[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : int = [self.sep_token_id]
_a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowercase ( self : List[str] ,_a : List[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_a : int = tuple(_A )
_a : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
_a : Optional[int] = get_pairs(_A )
if not pairs:
return token
while True:
_a : Optional[Any] = min(_A ,key=lambda _a : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_a : Tuple = bigram
_a : List[str] = []
_a : List[str] = 0
while i < len(_A ):
try:
_a : Union[str, Any] = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_a : Dict = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a : List[str] = tuple(_A )
_a : List[Any] = new_word
if len(_A ) == 1:
break
else:
_a : Tuple = get_pairs(_A )
_a : Tuple = '''@@ '''.join(_A )
_a : Tuple = word[:-4]
_a : Optional[int] = word
return word
def __lowercase ( self : str ,_a : Optional[int] ):
'''simple docstring'''
_a : int = []
_a : Optional[int] = re.findall(R'\S+\n?' ,_A )
for token in words:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowercase ( self : List[str] ,_a : Optional[Any] ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowercase ( self : Dict ,_a : int ):
'''simple docstring'''
return self.decoder.get(_A ,self.unk_token )
def __lowercase ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
_a : List[Any] = ''' '''.join(_A ).replace('@@ ' ,'' ).strip()
return out_string
def __lowercase ( self : Dict ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Tuple = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_a : Tuple = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file ,_A )
if os.path.abspath(self.merges_file ) != os.path.abspath(_A ):
copyfile(self.merges_file ,_A )
return out_vocab_file, out_merge_file
def __lowercase ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
if isinstance(_A ,_A ):
try:
with open(_A ,'r' ,encoding='utf-8' ) as fd:
self.add_from_file(_A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
_a : Any = f.readlines()
for lineTmp in lines:
_a : List[Any] = lineTmp.strip()
_a : Tuple = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
_a : Tuple = line[:idx]
_a : Optional[int] = len(self.encoder )
| 229 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCAmelCase_ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE : Dict = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE : Any = self.validation_dir
__SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCAmelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = input_size
__SCREAMING_SNAKE_CASE : List[str] = mask_patch_size
__SCREAMING_SNAKE_CASE : Dict = model_patch_size
__SCREAMING_SNAKE_CASE : int = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size
__SCREAMING_SNAKE_CASE : int = self.rand_size**2
__SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) )
__SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] )
__SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a__ ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE : int = split['''train''']
__SCREAMING_SNAKE_CASE : Dict = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : List[Any] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case , '''decoder_type''' ):
__SCREAMING_SNAKE_CASE : Any = '''simmim'''
# adapt config
__SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size
__SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__SCREAMING_SNAKE_CASE : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case )
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE : str = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE : List[str] = '''img'''
else:
__SCREAMING_SNAKE_CASE : Tuple = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__SCREAMING_SNAKE_CASE : Any = Compose(
[
Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__SCREAMING_SNAKE_CASE : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case ):
__SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]]
__SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case )
# Initialize our trainer
__SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : int = last_checkpoint
__SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A , config_name=_A )
_SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig.from_pretrained(_A , config_name=_A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _A )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained("gpt2" )
_SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig.from_model_config(_A )
_SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_A , _A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig()
_SCREAMING_SNAKE_CASE : Any = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(_A )
_SCREAMING_SNAKE_CASE : Any = generation_config.update(**_A )
# update_kwargs was not modified (no side effects)
self.assertEqual(_A , _A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_A , {"foo": "bar"} )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = GenerationConfig()
_SCREAMING_SNAKE_CASE : str = '''bar'''
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(_A )
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained(_A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
_SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig.from_model_config(_A )
assert not hasattr(_A , "foo" ) # no new kwargs should be initialized if from config
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _A )
self.assertEqual(default_config.num_beams , 1 )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained(_A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_A , repo_id="test-generation-config" , push_to_hub=_A , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : str = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_A , repo_id="valid_org/test-generation-config-org" , push_to_hub=_A , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
| 572 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''data2vec-vision'''
def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : List[str] = use_mask_token
__SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias
__SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : str = out_indices
__SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
| 74 | 0 |
from math import isclose, sqrt
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Any , snake_case__: Any ) -> str:
UpperCAmelCase__ = point_y / 4 / point_x
UpperCAmelCase__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase__ = outgoing_gradient**2 + 4
UpperCAmelCase__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase__ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
UpperCAmelCase__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase__ = x_minus if isclose(snake_case__ , snake_case__ ) else x_plus
UpperCAmelCase__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def UpperCamelCase_( snake_case__: int = 1.4 , snake_case__: Optional[int] = -9.6 ) -> str:
UpperCAmelCase__ = 0
UpperCAmelCase__ = first_x_coord
UpperCAmelCase__ = first_y_coord
UpperCAmelCase__ = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
UpperCAmelCase__ = next_point(snake_case__ , snake_case__ , snake_case__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 146 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
__SCREAMING_SNAKE_CASE : str = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_choices
__SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase_ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
__SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A )
__SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 74 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE__ = 'src/diffusers'
SCREAMING_SNAKE_CASE__ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
SCREAMING_SNAKE_CASE__ = spec.loader.load_module()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
return line.startswith(__UpperCamelCase ) or len(__UpperCamelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __UpperCamelCase ) is not None
def lowercase__ ( __UpperCamelCase )-> List[Any]:
UpperCamelCase = object_name.split(""".""" )
UpperCamelCase = 0
# First let's find the module where our object lives.
UpperCamelCase = parts[i]
while i < len(__UpperCamelCase ) and not os.path.isfile(os.path.join(__UpperCamelCase , F"{module}.py" ) ):
i += 1
if i < len(__UpperCamelCase ):
UpperCamelCase = os.path.join(__UpperCamelCase , parts[i] )
if i >= len(__UpperCamelCase ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(__UpperCamelCase , F"{module}.py" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase = ''''''
UpperCamelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(__UpperCamelCase ) and re.search(RF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__UpperCamelCase ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase = line_index
while line_index < len(__UpperCamelCase ) and _should_continue(lines[line_index] , __UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase = lines[start_index:line_index]
return "".join(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
SCREAMING_SNAKE_CASE__ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
SCREAMING_SNAKE_CASE__ = re.compile(R'<FILL\s+[^>]*>')
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = code.split("""\n""" )
UpperCamelCase = 0
while idx < len(__UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__UpperCamelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = len(get_indent(__UpperCamelCase ) ) > 0
if has_indent:
UpperCamelCase = F"class Bla:\n{code}"
UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__UpperCamelCase )
UpperCamelCase = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
UpperCamelCase = style_docstrings_in_code(__UpperCamelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=False )-> Dict:
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = []
UpperCamelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__UpperCamelCase ):
UpperCamelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase = search.groups()
UpperCamelCase = find_code_in_diffusers(__UpperCamelCase )
UpperCamelCase = get_indent(__UpperCamelCase )
UpperCamelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase = theoretical_indent
UpperCamelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase = True
while line_index < len(__UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__UpperCamelCase ):
break
UpperCamelCase = lines[line_index]
UpperCamelCase = _should_continue(__UpperCamelCase , __UpperCamelCase ) and re.search(F"^{indent}# End copy" , __UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase = lines[start_index:line_index]
UpperCamelCase = ''''''.join(__UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__UpperCamelCase ) is None]
UpperCamelCase = '''\n'''.join(__UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__UpperCamelCase ) > 0:
UpperCamelCase = replace_pattern.replace("""with""" , """""" ).split(""",""" )
UpperCamelCase = [_re_replace_pattern.search(__UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase = pattern.groups()
UpperCamelCase = re.sub(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if option.strip() == "all-casing":
UpperCamelCase = re.sub(obja.lower() , obja.lower() , __UpperCamelCase )
UpperCamelCase = re.sub(obja.upper() , obja.upper() , __UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase = blackify(lines[start_index - 1] + theoretical_code )
UpperCamelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCamelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase = start_index + 1
if overwrite and len(__UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
return diffs
def lowercase__ ( __UpperCamelCase = False )-> Dict:
UpperCamelCase = glob.glob(os.path.join(__UpperCamelCase , """**/*.py""" ) , recursive=__UpperCamelCase )
UpperCamelCase = []
for filename in all_files:
UpperCamelCase = is_copy_consistent(__UpperCamelCase , __UpperCamelCase )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(__UpperCamelCase ) > 0:
UpperCamelCase = '''\n'''.join(__UpperCamelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 301 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase_ = None
try:
import msvcrt
except ImportError:
lowercase_ = None
try:
import fcntl
except ImportError:
lowercase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase_ = OSError
# Data
# ------------------------------------------------
lowercase_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowercase_ = """3.0.12"""
lowercase_ = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ )
return _logger
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file
return None
def __str__( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = lock
return None
def __enter__( self : Any ):
"""simple docstring"""
return self.lock
def __exit__( self : str , _A : Any , _A : int , _A : Any ):
"""simple docstring"""
self.lock.release()
return None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A )
# The path to the lock file.
__SCREAMING_SNAKE_CASE : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE : str = None
# The default timeout value.
__SCREAMING_SNAKE_CASE : Any = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE : int = 0
return None
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = float(_A )
return None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ):
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE : Tuple = id(self )
__SCREAMING_SNAKE_CASE : Any = self._lock_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : int , _A : List[str]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE : Optional[int] = id(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__SCREAMING_SNAKE_CASE : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : int ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ):
"""simple docstring"""
self.release()
return None
def __del__( self : int ):
"""simple docstring"""
self.release(force=_A )
return None
def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A , _A )
else:
return path
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(_A , timeout=_A , max_filename_length=_A )
__SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A )
except OSError:
pass
else:
try:
msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : str = fd
return None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self._lock_file_fd
__SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A , timeout=_A , max_filename_length=_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A )
try:
fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : int = fd
return None
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd
__SCREAMING_SNAKE_CASE : Any = None
fcntl.flock(_A , fcntl.LOCK_UN )
os.close(_A )
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = fd
return None
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase_ = None
if msvcrt:
lowercase_ = WindowsFileLock
elif fcntl:
lowercase_ = UnixFileLock
else:
lowercase_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 74 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
_A : List[str] = '''data2vec-vision'''
def __init__( self : Optional[int] , _a : List[Any]=768 , _a : Any=12 , _a : str=12 , _a : Union[str, Any]=3_072 , _a : Union[str, Any]="gelu" , _a : List[Any]=0.0 , _a : Dict=0.0 , _a : Dict=0.02 , _a : Any=1E-12 , _a : Optional[Any]=224 , _a : Union[str, Any]=16 , _a : Tuple=3 , _a : List[Any]=False , _a : List[str]=False , _a : Dict=False , _a : Dict=False , _a : Any=0.1 , _a : List[str]=0.1 , _a : Dict=True , _a : Dict=[3, 5, 7, 11] , _a : Union[str, Any]=[1, 2, 3, 6] , _a : Optional[Any]=True , _a : Any=0.4 , _a : List[str]=256 , _a : Any=1 , _a : Any=False , _a : Union[str, Any]=255 , **_a : Tuple , ):
super().__init__(**_A )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = use_mask_token
UpperCamelCase__ = use_absolute_position_embeddings
UpperCamelCase__ = use_relative_position_bias
UpperCamelCase__ = use_shared_relative_position_bias
UpperCamelCase__ = layer_scale_init_value
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase__ = out_indices
UpperCamelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase__ = use_auxiliary_head
UpperCamelCase__ = auxiliary_loss_weight
UpperCamelCase__ = auxiliary_channels
UpperCamelCase__ = auxiliary_num_convs
UpperCamelCase__ = auxiliary_concat_input
UpperCamelCase__ = semantic_loss_ignore_index
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
_A : int = version.parse('''1.11''' )
@property
def A_ ( self : List[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A_ ( self : Optional[int] ):
return 1E-4
| 240 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
__SCREAMING_SNAKE_CASE : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = False
# Check that strings has a valid type
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(_A )}.''' )
__SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
__SCREAMING_SNAKE_CASE : Dict = [html_strings]
# Get nodes + xpaths
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
for html_string in html_strings:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A )
nodes.append(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
__SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
__SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 74 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowercase__ ( lowerCAmelCase__ ):
lowercase__ = """glpn"""
def __init__( self : Optional[int] ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Tuple=[2, 2, 2, 2] ,lowerCamelCase__ : Dict=[8, 4, 2, 1] ,lowerCamelCase__ : Dict=[32, 64, 160, 256] ,lowerCamelCase__ : List[Any]=[7, 3, 3, 3] ,lowerCamelCase__ : str=[4, 2, 2, 2] ,lowerCamelCase__ : Optional[Any]=[1, 2, 5, 8] ,lowerCamelCase__ : Any=[4, 4, 4, 4] ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Union[str, Any]=1E-6 ,lowerCamelCase__ : Dict=64 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : str=-1 ,**lowerCamelCase__ : Optional[int] ,):
'''simple docstring'''
super().__init__(**_A )
_UpperCamelCase : int = num_channels
_UpperCamelCase : List[str] = num_encoder_blocks
_UpperCamelCase : int = depths
_UpperCamelCase : List[Any] = sr_ratios
_UpperCamelCase : Union[str, Any] = hidden_sizes
_UpperCamelCase : Dict = patch_sizes
_UpperCamelCase : Tuple = strides
_UpperCamelCase : Dict = mlp_ratios
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : int = drop_path_rate
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Tuple = decoder_hidden_size
_UpperCamelCase : str = max_depth
_UpperCamelCase : Any = head_in_index
| 195 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ):
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case )
else:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case )
if hidden_sizes == 192:
__SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case )
if hidden_sizes == 256:
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case )
if hidden_sizes == 384:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case )
from_model.eval()
__SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval()
__SCREAMING_SNAKE_CASE : int = OrderedDict()
__SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict()
__SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() )
print(len(snake_case ) , len(snake_case ) )
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE : int = weights[og_keys[i]]
our_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) )
__SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits
assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE : Union[str, Any] = name
print(snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a__ ( snake_case , snake_case = None , snake_case = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : int = 1_000
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : str = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 74 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
lowerCAmelCase__ : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ : Union[str, Any] = np.zeros((n + 1,) )
lowerCAmelCase__ : str = ya
lowerCAmelCase__ : Union[str, Any] = xa
for k in range(lowercase__ ):
lowerCAmelCase__ : Optional[Any] = y[k] + step_size * ode_func(lowercase__ , y[k] )
lowerCAmelCase__ : Dict = y[k] + (
(step_size / 2) * (ode_func(lowercase__ , y[k] ) + ode_func(x + step_size , lowercase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 453 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( lowerCAmelCase__,unittest.TestCase ):
_a = DebertaVaTokenizer
_a = DebertaVaTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_A : str = DebertaVaTokenizer(_A , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self , _a ) -> Tuple:
_A : str = '''this is a test'''
_A : Dict = '''this is a test'''
return input_text, output_text
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = '''<pad>'''
_A : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def a__ ( self ) -> int:
_A : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(_A ) , 3_0001 )
def a__ ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def a__ ( self ) -> List[Any]:
_A : List[Any] = ''' \tHeLLo!how \n Are yoU? '''
_A : Union[str, Any] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_A : List[str] = DebertaVaTokenizer(_A , do_lower_case=_A )
_A : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_A : Tuple = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_A : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def a__ ( self ) -> Dict:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def a__ ( self ) -> List[str]:
pass
def a__ ( self ) -> Dict:
_A : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_A : Optional[int] = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_A : Any = DebertaVaTokenizer(_A , split_by_punct=_A )
_A : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_A : Tuple = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_A : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def a__ ( self ) -> Optional[Any]:
_A : str = '''I was born in 92000, and this is falsé.'''
_A : Optional[Any] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_A : List[str] = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_A : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_A : str = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_A : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def a__ ( self ) -> Any:
_A : Tuple = '''I was born in 92000, and this is falsé.'''
_A : List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_A : Union[str, Any] = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_A : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_A : Optional[int] = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_A : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def a__ ( self ) -> List[str]:
_A : Any = '''I was born in 92000, and this is falsé.'''
_A : Optional[int] = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_A : Optional[int] = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_A : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_A : Union[str, Any] = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_A : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = ''' \tHeLLo!how \n Are yoU? '''
_A : Union[str, Any] = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_A : Tuple = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_A : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_A : Dict = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_A : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = self.get_tokenizer()
_A : Any = self.get_rust_tokenizer()
_A : Dict = '''I was born in 92000, and this is falsé.'''
_A : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_A : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_A : str = tokenizer.encode(_A , add_special_tokens=_A )
_A : List[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_A : Optional[Any] = self.get_rust_tokenizer()
_A : Tuple = tokenizer.encode(_A )
_A : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def a__ ( self ) -> Tuple:
_A : Optional[int] = '''This is a test'''
_A : str = [13, 1, 4398, 25, 21, 1289]
_A : Any = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_A : Optional[Any] = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_A : List[str] = DebertaVaTokenizer(_A , keep_accents=_A )
_A : Dict = DebertaVaTokenizerFast(_A , keep_accents=_A )
_A : int = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_A : Tuple = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_A : Union[str, Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_A : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_A : Dict = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_A : Any = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_A : int = '''I was born in 92000, and this is falsé.'''
_A : Union[str, Any] = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_A : str = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_A : Tuple = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_A : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_A : List[str] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_A : str = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_A : Union[str, Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_A : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_A : List[Any] = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def a__ ( self ) -> Union[str, Any]:
_A : Dict = DebertaVaTokenizer(_A )
_A : Union[str, Any] = tokenizer.encode("""sequence builders""" )
_A : Any = tokenizer.encode("""multi-sequence build""" )
_A : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
_A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def a__ ( self ) -> List[Any]:
_A : Any = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 307 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = set()
__SCREAMING_SNAKE_CASE : str = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
__SCREAMING_SNAKE_CASE : int = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
__SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( snake_case ):
"""simple docstring"""
return values.split(''',''' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase_ = extract_warnings(args.output_dir, args.targets)
lowercase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 74 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) + 1
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_SCREAMING_SNAKE_CASE = [[0 for i in range(SCREAMING_SNAKE_CASE_ )] for j in range(SCREAMING_SNAKE_CASE_ )]
# since string of zero length match pattern of zero length
_SCREAMING_SNAKE_CASE = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
for j in range(1 , SCREAMING_SNAKE_CASE_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_SCREAMING_SNAKE_CASE = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_SCREAMING_SNAKE_CASE = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_SCREAMING_SNAKE_CASE = dp[i - 1][j]
else:
_SCREAMING_SNAKE_CASE = 0
else:
_SCREAMING_SNAKE_CASE = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCamelCase__ : Optional[Any] = "aab"
UpperCamelCase__ : Optional[Any] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 591 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A )
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , )
for d in range(_A )
] )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
__SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
__SCREAMING_SNAKE_CASE : Dict = hidden_states
__SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A )
__SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A )
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE : Optional[Any] = block(
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , )
# 3. Output
__SCREAMING_SNAKE_CASE : Any = self.proj_out(_A )
__SCREAMING_SNAKE_CASE : List[str] = (
hidden_states[None, None, :]
.reshape(_A , _A , _A , _A , _A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_A )
| 74 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.head == self.tail
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
self.data.append(_A )
lowerCAmelCase__ = self.tail + 1
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.data[self.head]
lowerCAmelCase__ = self.head + 1
return ret
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.tail - self.head
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class __snake_case :
def __init__( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = data
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = 1
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.data
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.left
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.right
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.height
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = data
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = node
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = node
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = height
def UpperCAmelCase_ ( snake_case__ ) -> str:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
if a > b:
return a
return b
def UpperCAmelCase_ ( snake_case__ ) -> Any:
"""simple docstring"""
print('left rotation node:' , node.get_data() )
lowerCAmelCase__ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(snake_case__ )
lowerCAmelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(snake_case__ )
lowerCAmelCase__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(snake_case__ )
return ret
def UpperCAmelCase_ ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
print('right rotation node:' , node.get_data() )
lowerCAmelCase__ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(snake_case__ )
lowerCAmelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(snake_case__ )
lowerCAmelCase__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(snake_case__ )
return ret
def UpperCAmelCase_ ( snake_case__ ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(snake_case__ ) )
return right_rotation(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(snake_case__ ) )
return left_rotation(snake_case__ )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if node is None:
return MyNode(snake_case__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , snake_case__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowerCAmelCase__ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCAmelCase__ = right_rotation(snake_case__ )
else:
lowerCAmelCase__ = lr_rotation(snake_case__ )
else:
node.set_right(insert_node(node.get_right() , snake_case__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowerCAmelCase__ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCAmelCase__ = rl_rotation(snake_case__ )
else:
lowerCAmelCase__ = left_rotation(snake_case__ )
lowerCAmelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(snake_case__ )
return node
def UpperCAmelCase_ ( snake_case__ ) -> List[Any]:
"""simple docstring"""
while True:
lowerCAmelCase__ = root.get_right()
if right_child is None:
break
lowerCAmelCase__ = right_child
return root.get_data()
def UpperCAmelCase_ ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
while True:
lowerCAmelCase__ = root.get_left()
if left_child is None:
break
lowerCAmelCase__ = left_child
return root.get_data()
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = root.get_left()
lowerCAmelCase__ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCAmelCase__ = get_left_most(snake_case__ )
root.set_data(snake_case__ )
root.set_right(del_node(snake_case__ , snake_case__ ) )
elif left_child is not None:
lowerCAmelCase__ = left_child
elif right_child is not None:
lowerCAmelCase__ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(snake_case__ , snake_case__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(snake_case__ , snake_case__ ) )
if get_height(snake_case__ ) - get_height(snake_case__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowerCAmelCase__ = left_rotation(snake_case__ )
else:
lowerCAmelCase__ = rl_rotation(snake_case__ )
elif get_height(snake_case__ ) - get_height(snake_case__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowerCAmelCase__ = right_rotation(snake_case__ )
else:
lowerCAmelCase__ = lr_rotation(snake_case__ )
lowerCAmelCase__ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(snake_case__ )
return root
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ = None
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return get_height(self.root )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
print('insert:' + str(_A ) )
lowerCAmelCase__ = insert_node(self.root ,_A )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
print('delete:' + str(_A ) )
if self.root is None:
print('Tree is empty!' )
return
lowerCAmelCase__ = del_node(self.root ,_A )
def __str__( self ,): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = MyQueue()
q.push(self.root )
lowerCAmelCase__ = self.get_height()
if layer == 0:
return output
lowerCAmelCase__ = 0
while not q.is_empty():
lowerCAmelCase__ = q.pop()
lowerCAmelCase__ = ''' ''' * int(math.pow(2 ,layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_A )
q.push(_A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCAmelCase__ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 ,_A ) - 1:
lowerCAmelCase__ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_lowerCAmelCase : List[Any] = AVLtree()
_lowerCAmelCase : Union[str, Any] = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 193 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_SCREAMING_SNAKE_CASE = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["DPTFeatureExtractor"]
_SCREAMING_SNAKE_CASE = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Tuple = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2'''
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_params
__SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[str] = shard(_A )
__SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 74 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_a : Dict = str(bin(__a ) )[2:] # remove the leading "0b"
_a : Optional[Any] = str(bin(__a ) )[2:] # remove the leading "0b"
_a : str = max(len(__a ) , len(__a ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__a ) , b_binary.zfill(__a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2FeatureExtractor"""]
lowercase_ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowercase_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*_A , **_A )
requires_backends(self , "vision" )
self.check_model_type(_A )
def __call__( self , snake_case__ , **snake_case__ ):
"""simple docstring"""
return super().__call__(_A , **_A )
def __SCREAMING_SNAKE_CASE ( self , **snake_case__ ):
"""simple docstring"""
return {}, {}, {}
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = load_image(_A )
_SCREAMING_SNAKE_CASE : Optional[int] = image.size
_SCREAMING_SNAKE_CASE : Any = self.image_processor(images=_A , return_tensors=self.framework )
return model_inputs
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.model(**_A )
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = model_outputs.predicted_depth
_SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=_A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = prediction.squeeze().cpu().numpy()
_SCREAMING_SNAKE_CASE : Any = (output * 255 / np.max(_A )).astype("uint8" )
_SCREAMING_SNAKE_CASE : str = Image.fromarray(_A )
_SCREAMING_SNAKE_CASE : str = {}
_SCREAMING_SNAKE_CASE : Tuple = predicted_depth
_SCREAMING_SNAKE_CASE : str = depth
return output_dict
| 572 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileBertTokenizer
lowerCAmelCase_ = MobileBertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
lowerCAmelCase_ = '''google/mobilebert-uncased'''
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__SCREAMING_SNAKE_CASE : Dict = {}
for i, token in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[str] = i
__SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
__SCREAMING_SNAKE_CASE : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有''']
__SCREAMING_SNAKE_CASE : int = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE : List[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 74 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """convbert"""
def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=1 , __a=0 , __a=2 , __a=768 , __a=2 , __a=9 , __a=1 , __a=None , **__a , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = embedding_size
UpperCAmelCase__ = head_ratio
UpperCAmelCase__ = conv_kernel_size
UpperCAmelCase__ = num_groups
UpperCAmelCase__ = classifier_dropout
class lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 146 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 74 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 10 , __UpperCamelCase = 22 )-> List[str]:
UpperCamelCase = range(1 , __UpperCamelCase )
UpperCamelCase = range(1 , __UpperCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 301 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = ParquetConfig
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : str , _A : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 74 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase__, id=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if exitstatus == 5:
UpperCamelCase__ = 0
# Doctest custom flag to ignore output.
lowercase = doctest.register_optionflag("""IGNORE_RESULT""")
lowercase = doctest.OutputChecker
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def A_ ( self : int , _a : Any , _a : Tuple , _a : Optional[int] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _A , _A , _A )
lowercase = CustomOutputChecker
lowercase = HfDoctestModule
lowercase = HfDocTestParser
| 240 |
from math import isclose, sqrt
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__SCREAMING_SNAKE_CASE : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus
__SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( snake_case = 1.4 , snake_case = -9.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : float = first_x_coord
__SCREAMING_SNAKE_CASE : float = first_y_coord
__SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74 | 0 |
'''simple docstring'''
from itertools import count
def A__ ( UpperCAmelCase_ = 5_0 ):
_UpperCamelCase : List[str] = [1] * min_block_length
for n in count(UpperCAmelCase_ ):
fill_count_functions.append(1 )
for block_length in range(UpperCAmelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 195 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : int = min_resolution
__SCREAMING_SNAKE_CASE : Optional[int] = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Union[str, Any] = size
__SCREAMING_SNAKE_CASE : str = do_center_crop
__SCREAMING_SNAKE_CASE : Any = crop_size
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''crop_size''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 74 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = StableDiffusionXLImgaImgPipeline
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_A , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCAmelCase__ : List[str] = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
lowerCAmelCase__ : int = CLIPTextModel(_A )
lowerCAmelCase__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_A )
lowerCAmelCase__ : List[str] = CLIPTextModelWithProjection(_A )
lowerCAmelCase__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_A )
lowerCAmelCase__ : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case=0 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
lowerCAmelCase__ : Tuple = image / 2 + 0.5
if str(_A ).startswith("mps" ):
lowerCAmelCase__ : List[str] = torch.manual_seed(_A )
else:
lowerCAmelCase__ : Optional[int] = torch.Generator(device=_A ).manual_seed(_A )
lowerCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = StableDiffusionXLImgaImgPipeline(**_A )
lowerCAmelCase__ : Union[str, Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase__ : Optional[int] = self.get_dummy_inputs(_A )
lowerCAmelCase__ : Optional[int] = sd_pipe(**_A ).images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**_A )
lowerCAmelCase__ : List[str] = sd_pipe.to(_A )
lowerCAmelCase__ : Optional[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
# forward without prompt embeds
lowerCAmelCase__ : str = self.get_dummy_inputs(_A )
lowerCAmelCase__ : Union[str, Any] = 3 * ['''this is a negative prompt''']
lowerCAmelCase__ : Union[str, Any] = negative_prompt
lowerCAmelCase__ : List[Any] = 3 * [inputs['''prompt''']]
lowerCAmelCase__ : Optional[int] = sd_pipe(**_A )
lowerCAmelCase__ : int = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(_A )
lowerCAmelCase__ : Dict = 3 * ['''this is a negative prompt''']
lowerCAmelCase__ : str = 3 * [inputs.pop("prompt" )]
(
lowerCAmelCase__
) : Tuple = sd_pipe.encode_prompt(_A , negative_prompt=_A )
lowerCAmelCase__ : Dict = sd_pipe(
**_A , prompt_embeds=_A , negative_prompt_embeds=_A , pooled_prompt_embeds=_A , negative_pooled_prompt_embeds=_A , )
lowerCAmelCase__ : str = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case="cpu" , snake_case=torch.floataa , snake_case=0 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = torch.Generator(device=_A ).manual_seed(_A )
lowerCAmelCase__ : Dict = np.random.RandomState(_A ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase__ : str = torch.from_numpy(_A ).to(device=_A , dtype=_A )
lowerCAmelCase__ : Tuple = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase__ : Union[str, Any] = self.get_inputs(_A )
lowerCAmelCase__ : Optional[int] = pipe(**_A ).images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ : Tuple = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 453 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
__SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__SCREAMING_SNAKE_CASE : Dict = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1
return z_result
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class lowercase ( lowerCAmelCase__ ):
_a = "align_text_model"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a="absolute" , _a=True , **_a , ) -> Optional[int]:
super().__init__(**_A )
_A : Union[str, Any] = vocab_size
_A : Dict = hidden_size
_A : str = num_hidden_layers
_A : Union[str, Any] = num_attention_heads
_A : Optional[int] = hidden_act
_A : Any = intermediate_size
_A : Optional[int] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Union[str, Any] = type_vocab_size
_A : Optional[Any] = initializer_range
_A : Optional[int] = layer_norm_eps
_A : Optional[Any] = position_embedding_type
_A : Tuple = use_cache
_A : Union[str, Any] = pad_token_id
@classmethod
def a__ ( cls , _a , **_a ) -> Tuple:
cls._set_token_in_kwargs(_A )
_A : Dict = cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
_A : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_A , **_A )
class lowercase ( lowerCAmelCase__ ):
_a = "align_vision_model"
def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.25 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.02 , _a = 0.001 , _a = 0.99 , _a = 0.2 , **_a , ) -> Optional[int]:
super().__init__(**_A )
_A : Optional[int] = num_channels
_A : Tuple = image_size
_A : Any = width_coefficient
_A : Union[str, Any] = depth_coefficient
_A : Tuple = depth_divisor
_A : Any = kernel_sizes
_A : Tuple = in_channels
_A : str = out_channels
_A : str = depthwise_padding
_A : List[Any] = strides
_A : List[str] = num_block_repeats
_A : Union[str, Any] = expand_ratios
_A : Union[str, Any] = squeeze_expansion_ratio
_A : int = hidden_act
_A : str = hidden_dim
_A : Union[str, Any] = pooling_type
_A : Any = initializer_range
_A : int = batch_norm_eps
_A : Optional[Any] = batch_norm_momentum
_A : Optional[int] = drop_connect_rate
_A : Optional[Any] = sum(_A ) * 4
@classmethod
def a__ ( cls , _a , **_a ) -> Optional[Any]:
cls._set_token_in_kwargs(_A )
_A : Union[str, Any] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
_A : List[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_A , **_A )
class lowercase ( lowerCAmelCase__ ):
_a = "align"
_a = True
def __init__( self , _a=None , _a=None , _a=640 , _a=1.0 , _a=0.02 , **_a , ) -> List[Any]:
super().__init__(**_A )
if text_config is None:
_A : Dict = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
_A : Any = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
_A : Tuple = AlignTextConfig(**_A )
_A : int = AlignVisionConfig(**_A )
_A : Optional[int] = projection_dim
_A : str = temperature_init_value
_A : Any = initializer_range
@classmethod
def a__ ( cls , _a , _a , **_a ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = copy.deepcopy(self.__dict__ )
_A : Union[str, Any] = self.text_config.to_dict()
_A : Optional[int] = self.vision_config.to_dict()
_A : int = self.__class__.model_type
return output
| 307 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _a (unittest.TestCase):
"""simple docstring"""
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=None , A__=True , A__=[0.4814_5466, 0.457_8275, 0.4082_1073] , A__=[0.2686_2954, 0.2613_0258, 0.2757_7711] , A__=True , ) -> List[Any]:
_SCREAMING_SNAKE_CASE = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCamelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase ( self , A__=False , A__=False , A__=False ) -> Optional[int]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE = [torch.from_numpy(_A ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _a (lowerCAmelCase__ , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self , do_center_crop=_A )
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , """do_resize""" ) )
self.assertTrue(hasattr(_A , """size""" ) )
self.assertTrue(hasattr(_A , """do_center_crop""" ) )
self.assertTrue(hasattr(_A , """center_crop""" ) )
self.assertTrue(hasattr(_A , """do_normalize""" ) )
self.assertTrue(hasattr(_A , """image_mean""" ) )
self.assertTrue(hasattr(_A , """image_std""" ) )
self.assertTrue(hasattr(_A , """do_convert_rgb""" ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_24, """width""": 2_24} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class _a (lowerCAmelCase__ , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_A )
_SCREAMING_SNAKE_CASE = 3
@property
def UpperCamelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , """do_resize""" ) )
self.assertTrue(hasattr(_A , """size""" ) )
self.assertTrue(hasattr(_A , """do_center_crop""" ) )
self.assertTrue(hasattr(_A , """center_crop""" ) )
self.assertTrue(hasattr(_A , """do_normalize""" ) )
self.assertTrue(hasattr(_A , """image_mean""" ) )
self.assertTrue(hasattr(_A , """image_std""" ) )
self.assertTrue(hasattr(_A , """do_convert_rgb""" ) )
def UpperCamelCase ( self ) -> List[str]:
pass
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 591 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCAmelCase : List[Any] = TypeVar("T")
class __snake_case ( Generic[T] ):
def __init__( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = len(_A )
lowerCAmelCase__ = [any_type for _ in range(self.N )] + arr
lowerCAmelCase__ = fnc
self.build()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for p in range(self.N - 1 ,0 ,-1 ):
lowerCAmelCase__ = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
p += self.N
lowerCAmelCase__ = v
while p > 1:
lowerCAmelCase__ = p // 2
lowerCAmelCase__ = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ): # noqa: E741
"""simple docstring"""
lowerCAmelCase__ = l + self.N, r + self.N
lowerCAmelCase__ = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase__ = self.st[l] if res is None else self.fn(_A ,self.st[l] )
if r % 2 == 0:
lowerCAmelCase__ = self.st[r] if res is None else self.fn(_A ,self.st[r] )
lowerCAmelCase__ = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCAmelCase : Tuple = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
_lowerCAmelCase : Optional[Any] = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
_lowerCAmelCase : Optional[Any] = SegmentTree(test_array, min)
_lowerCAmelCase : List[str] = SegmentTree(test_array, max)
_lowerCAmelCase : Union[str, Any] = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
for i in range(len(snake_case__ ) ):
for j in range(snake_case__ , len(snake_case__ ) ):
lowerCAmelCase__ = reduce(snake_case__ , test_array[i : j + 1] )
lowerCAmelCase__ = reduce(snake_case__ , test_array[i : j + 1] )
lowerCAmelCase__ = reduce(lambda snake_case__ , snake_case__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case__ , snake_case__ )
assert max_range == max_segment_tree.query(snake_case__ , snake_case__ )
assert sum_range == sum_segment_tree.query(snake_case__ , snake_case__ )
test_all_segments()
for index, value in test_updates.items():
_lowerCAmelCase : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 193 |
from pathlib import Path
import fire
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = Path(snake_case )
__SCREAMING_SNAKE_CASE : Dict = Path(snake_case )
dest_dir.mkdir(exist_ok=snake_case )
for path in src_dir.iterdir():
__SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name )
print(snake_case )
dest_path.open('''w''' ).write('''\n'''.join(snake_case ) )
if __name__ == "__main__":
fire.Fire(minify)
| 74 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> Union[str, Any]:
snake_case = []
if len(__lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(__lowerCAmelCase ) ):
snake_case = nums.pop(0 )
snake_case = permute(__lowerCAmelCase )
for perm in permutations:
perm.append(__lowerCAmelCase )
result.extend(__lowerCAmelCase )
nums.append(__lowerCAmelCase )
return result
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> str:
def backtrack(__lowerCAmelCase : Optional[Any] ):
if start == len(__lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
snake_case = nums[i], nums[start]
backtrack(start + 1 )
snake_case = nums[i], nums[start] # backtrack
snake_case = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_SCREAMING_SNAKE_CASE = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 369 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 )
__SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 74 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
__lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
__lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
__lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[Any] = DPRContextEncoderTokenizer
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : int = DPRQuestionEncoderTokenizer
__lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCAmelCase__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : List[str] ,_a : Tuple ,_a : Optional[str] = None ,_a : Optional[str] = None ,_a : Union[bool, str] = False ,_a : Union[bool, str] = False ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[bool] = None ,**_a : Dict ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_a : Tuple = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_a : List[Any] = titles if not isinstance(_A ,_A ) else [titles]
_a : Any = texts if not isinstance(_A ,_A ) else [texts]
_a : List[Any] = len(_A )
_a : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
assert len(_A ) == len(
_A ), F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts."""
_a : Tuple = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['''input_ids''']
_a : Any = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['''input_ids''']
_a : str = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_a : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_a : str = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowercase ( self : int ,_a : BatchEncoding ,_a : DPRReaderOutput ,_a : int = 16 ,_a : int = 64 ,_a : int = 4 ,):
'''simple docstring'''
_a : Union[str, Any] = reader_input['''input_ids''']
_a : List[Any] = reader_output[:3]
_a : int = len(_A )
_a : str = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_a : Optional[int] = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_a : int = sequence_ids.index(self.pad_token_id )
else:
_a : Union[str, Any] = len(_A )
_a : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowercase ( self : List[str] ,_a : List[int] ,_a : List[int] ,_a : int ,_a : int ,):
'''simple docstring'''
_a : Dict = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_a : Optional[Any] = sorted(_A ,key=lambda _a : x[1] ,reverse=_A )
_a : int = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
_a : Dict = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class UpperCAmelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = READER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = READER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : Tuple = DPRReaderTokenizer
| 229 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCAmelCase_ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE : Dict = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE : Any = self.validation_dir
__SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCAmelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = input_size
__SCREAMING_SNAKE_CASE : List[str] = mask_patch_size
__SCREAMING_SNAKE_CASE : Dict = model_patch_size
__SCREAMING_SNAKE_CASE : int = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size
__SCREAMING_SNAKE_CASE : int = self.rand_size**2
__SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) )
__SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] )
__SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a__ ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE : int = split['''train''']
__SCREAMING_SNAKE_CASE : Dict = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : List[Any] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case , '''decoder_type''' ):
__SCREAMING_SNAKE_CASE : Any = '''simmim'''
# adapt config
__SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size
__SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__SCREAMING_SNAKE_CASE : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case )
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE : str = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE : List[str] = '''img'''
else:
__SCREAMING_SNAKE_CASE : Tuple = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__SCREAMING_SNAKE_CASE : Any = Compose(
[
Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__SCREAMING_SNAKE_CASE : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case ):
__SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]]
__SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case )
# Initialize our trainer
__SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : int = last_checkpoint
__SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Union[str, Any] ) -> Any:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''data2vec-vision'''
def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : List[str] = use_mask_token
__SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias
__SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : str = out_indices
__SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
| 74 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase = 16
_UpperCamelCase = 32
def UpperCamelCase_( snake_case__: Dict , snake_case__: Optional[int] = 16 ) -> Optional[Any]:
UpperCAmelCase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCAmelCase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__: Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__: List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCAmelCase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase = mocked_dataloaders # noqa: F811
def UpperCamelCase_( snake_case__: List[str] , snake_case__: Tuple ) -> Optional[int]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case__ ) == "1":
UpperCAmelCase__ = 2
# New Code #
UpperCAmelCase__ = int(args.gradient_accumulation_steps )
UpperCAmelCase__ = int(args.local_sgd_steps )
# Initialize accelerator
UpperCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config['''lr''']
UpperCAmelCase__ = int(config['num_epochs'] )
UpperCAmelCase__ = int(config['seed'] )
UpperCAmelCase__ = int(config['batch_size'] )
UpperCAmelCase__ = evaluate.load('glue' , 'mrpc' )
set_seed(snake_case__ )
UpperCAmelCase__ = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
UpperCAmelCase__ = model(**snake_case__ )
UpperCAmelCase__ = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case__ )
def UpperCamelCase_( ) -> Union[str, Any]:
UpperCAmelCase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=snake_case__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 146 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
__SCREAMING_SNAKE_CASE : str = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_choices
__SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase_ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
__SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A )
__SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 74 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ ( lowerCAmelCase__ ):
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_A , """num_heads""" ) )
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[16, 48, 96] , _SCREAMING_SNAKE_CASE=[1, 3, 6] , _SCREAMING_SNAKE_CASE=[1, 2, 10] , _SCREAMING_SNAKE_CASE=[7, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 2] , _SCREAMING_SNAKE_CASE=[False, False, True] , _SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=2 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = num_labels
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = stride_kv
UpperCamelCase = depth
UpperCamelCase = cls_token
UpperCamelCase = attention_drop_rate
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = TFCvtModel(config=_A )
UpperCamelCase = model(_A , training=_A )
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFCvtForImageClassification(_A )
UpperCamelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = config_and_inputs
UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowercase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowercase = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = TFCvtModelTester(self )
UpperCamelCase = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A__ ( self ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def A__ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def A__ ( self ) -> int:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_A )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A__ ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = model_class(_A )
UpperCamelCase = model(**self._prepare_for_class(_A , _A ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(_A , _A , _A )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def A__ ( self ) -> str:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFCvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowercase__ ( )-> Dict:
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_A , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**_A )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCamelCase = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 301 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase_ = None
try:
import msvcrt
except ImportError:
lowercase_ = None
try:
import fcntl
except ImportError:
lowercase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase_ = OSError
# Data
# ------------------------------------------------
lowercase_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowercase_ = """3.0.12"""
lowercase_ = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ )
return _logger
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file
return None
def __str__( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = lock
return None
def __enter__( self : Any ):
"""simple docstring"""
return self.lock
def __exit__( self : str , _A : Any , _A : int , _A : Any ):
"""simple docstring"""
self.lock.release()
return None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A )
# The path to the lock file.
__SCREAMING_SNAKE_CASE : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE : str = None
# The default timeout value.
__SCREAMING_SNAKE_CASE : Any = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE : int = 0
return None
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = float(_A )
return None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ):
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE : Tuple = id(self )
__SCREAMING_SNAKE_CASE : Any = self._lock_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : int , _A : List[str]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE : Optional[int] = id(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__SCREAMING_SNAKE_CASE : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : int ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ):
"""simple docstring"""
self.release()
return None
def __del__( self : int ):
"""simple docstring"""
self.release(force=_A )
return None
def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A , _A )
else:
return path
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(_A , timeout=_A , max_filename_length=_A )
__SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A )
except OSError:
pass
else:
try:
msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : str = fd
return None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self._lock_file_fd
__SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A , timeout=_A , max_filename_length=_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A )
try:
fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : int = fd
return None
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd
__SCREAMING_SNAKE_CASE : Any = None
fcntl.flock(_A , fcntl.LOCK_UN )
os.close(_A )
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = fd
return None
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase_ = None
if msvcrt:
lowercase_ = WindowsFileLock
elif fcntl:
lowercase_ = UnixFileLock
else:
lowercase_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 74 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = checkpoint
UpperCamelCase__ = {}
UpperCamelCase__ = vae_state_dict['''encoder.conv_in.weight''']
UpperCamelCase__ = vae_state_dict['''encoder.conv_in.bias''']
UpperCamelCase__ = vae_state_dict['''encoder.conv_out.weight''']
UpperCamelCase__ = vae_state_dict['''encoder.conv_out.bias''']
UpperCamelCase__ = vae_state_dict['''encoder.norm_out.weight''']
UpperCamelCase__ = vae_state_dict['''encoder.norm_out.bias''']
UpperCamelCase__ = vae_state_dict['''decoder.conv_in.weight''']
UpperCamelCase__ = vae_state_dict['''decoder.conv_in.bias''']
UpperCamelCase__ = vae_state_dict['''decoder.conv_out.weight''']
UpperCamelCase__ = vae_state_dict['''decoder.conv_out.bias''']
UpperCamelCase__ = vae_state_dict['''decoder.norm_out.weight''']
UpperCamelCase__ = vae_state_dict['''decoder.norm_out.bias''']
UpperCamelCase__ = vae_state_dict['''quant_conv.weight''']
UpperCamelCase__ = vae_state_dict['''quant_conv.bias''']
UpperCamelCase__ = vae_state_dict['''post_quant_conv.weight''']
UpperCamelCase__ = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
UpperCamelCase__ = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
UpperCamelCase__ = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the decoder up blocks only
UpperCamelCase__ = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
UpperCamelCase__ = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(UpperCamelCase__ )
}
for i in range(UpperCamelCase__ ):
UpperCamelCase__ = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCamelCase__ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
UpperCamelCase__ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
UpperCamelCase__ = renew_vae_resnet_paths(UpperCamelCase__ )
UpperCamelCase__ = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path], config=UpperCamelCase__ )
UpperCamelCase__ = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
UpperCamelCase__ = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCamelCase__ = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
UpperCamelCase__ = renew_vae_resnet_paths(UpperCamelCase__ )
UpperCamelCase__ = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path], config=UpperCamelCase__ )
UpperCamelCase__ = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
UpperCamelCase__ = renew_vae_attention_paths(UpperCamelCase__ )
UpperCamelCase__ = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path], config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
UpperCamelCase__ = num_up_blocks - 1 - i
UpperCamelCase__ = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCamelCase__ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCamelCase__ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCamelCase__ = renew_vae_resnet_paths(UpperCamelCase__ )
UpperCamelCase__ = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path], config=UpperCamelCase__ )
UpperCamelCase__ = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
UpperCamelCase__ = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCamelCase__ = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
UpperCamelCase__ = renew_vae_resnet_paths(UpperCamelCase__ )
UpperCamelCase__ = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path], config=UpperCamelCase__ )
UpperCamelCase__ = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
UpperCamelCase__ = renew_vae_attention_paths(UpperCamelCase__ )
UpperCamelCase__ = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path], config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
return new_checkpoint
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], ):
'''simple docstring'''
UpperCamelCase__ = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
UpperCamelCase__ = io.BytesIO(r.content )
UpperCamelCase__ = OmegaConf.load(UpperCamelCase__ )
UpperCamelCase__ = 512
UpperCamelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
UpperCamelCase__ = {}
with safe_open(UpperCamelCase__, framework='''pt''', device='''cpu''' ) as f:
for key in f.keys():
UpperCamelCase__ = f.get_tensor(UpperCamelCase__ )
else:
UpperCamelCase__ = torch.load(UpperCamelCase__, map_location=UpperCamelCase__ )['''state_dict''']
# Convert the VAE model.
UpperCamelCase__ = create_vae_diffusers_config(UpperCamelCase__, image_size=UpperCamelCase__ )
UpperCamelCase__ = custom_convert_ldm_vae_checkpoint(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = AutoencoderKL(**UpperCamelCase__ )
vae.load_state_dict(UpperCamelCase__ )
vae.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowercase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 240 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
__SCREAMING_SNAKE_CASE : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = False
# Check that strings has a valid type
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(_A )}.''' )
__SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
__SCREAMING_SNAKE_CASE : Dict = [html_strings]
# Get nodes + xpaths
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
for html_string in html_strings:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A )
nodes.append(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
__SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
__SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 74 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
snake_case_ : Optional[int] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def A__ ( UpperCAmelCase_ = "mumbai" ):
_UpperCamelCase : Tuple = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
_UpperCamelCase : Optional[int] = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
_UpperCamelCase : List[str] = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 195 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ):
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case )
else:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case )
if hidden_sizes == 192:
__SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case )
if hidden_sizes == 256:
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case )
if hidden_sizes == 384:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case )
from_model.eval()
__SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval()
__SCREAMING_SNAKE_CASE : int = OrderedDict()
__SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict()
__SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() )
print(len(snake_case ) , len(snake_case ) )
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE : int = weights[og_keys[i]]
our_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) )
__SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits
assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE : Union[str, Any] = name
print(snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a__ ( snake_case , snake_case = None , snake_case = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : int = 1_000
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : str = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 74 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class __a :
"""simple docstring"""
def __init__( self , snake_case = None ):
"""simple docstring"""
lowerCAmelCase__ : int = value
lowerCAmelCase__ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __repr__( self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
"""simple docstring"""
def __init__( self , snake_case = None ):
"""simple docstring"""
lowerCAmelCase__ : str = root
def __str__( self ):
"""simple docstring"""
return str(self.root )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
if new_children is not None: # reset its kids
lowerCAmelCase__ : List[Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
lowerCAmelCase__ : Tuple = new_children
else:
lowerCAmelCase__ : Tuple = new_children
else:
lowerCAmelCase__ : int = new_children
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.root is None
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ : List[str] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ : Optional[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ : Dict = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ : Tuple = new_node
break
else:
lowerCAmelCase__ : Optional[Any] = parent_node.right
lowerCAmelCase__ : Optional[Any] = parent_node
def SCREAMING_SNAKE_CASE_ ( self , *snake_case ):
"""simple docstring"""
for value in values:
self.__insert(_A )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase__ : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ : Dict = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE_ ( self , snake_case = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
lowerCAmelCase__ : Tuple = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ : List[Any] = node.right
return node
def SCREAMING_SNAKE_CASE_ ( self , snake_case = None ):
"""simple docstring"""
if node is None:
lowerCAmelCase__ : Any = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ : Optional[Any] = self.root
while node.left is not None:
lowerCAmelCase__ : Optional[Any] = node.left
return node
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
lowerCAmelCase__ : Any = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ : Dict = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def SCREAMING_SNAKE_CASE_ ( self , snake_case=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = []
if curr_node is not None:
lowerCAmelCase__ : Dict = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
lowerCAmelCase__ : Any = BinarySearchTree()
for i in testlist:
t.insert(lowercase__ )
# Prints all the elements of the list in order traversal
print(lowercase__ )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn\'t exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn\'t exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowercase__ )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 453 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_snake_case = logging.get_logger(__name__)
@dataclass
class lowercase ( lowerCAmelCase__ ):
_a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **_a ) -> List[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_A : str = deprecated_arg[3:]
_A : Optional[int] = not kwargs.pop(_A )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_A : str = kwargs.pop("""tpu_name""" , self.tpu_name )
_A : List[str] = kwargs.pop("""device_idx""" , self.device_idx )
_A : Dict = kwargs.pop("""eager_mode""" , self.eager_mode )
_A : Dict = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_A )
_a = field(
default=lowerCAmelCase__,metadata={"help": "Name of TPU"},)
_a = field(
default=0,metadata={"help": "CPU / GPU device index. Defaults to 0."},)
_a = field(default=lowerCAmelCase__,metadata={"help": "Benchmark models in eager model."} )
_a = field(
default=lowerCAmelCase__,metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
},)
@cached_property
def a__ ( self ) -> str:
requires_backends(self , ["""tf"""] )
_A : str = None
if self.tpu:
try:
if self.tpu_name:
_A : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_A : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_A : Optional[Any] = None
return tpu
@cached_property
def a__ ( self ) -> Dict:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_A : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
_A : List[Any] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
_A : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def a__ ( self ) -> List[Any]:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def a__ ( self ) -> int:
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def a__ ( self ) -> Optional[Any]:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def a__ ( self ) -> Optional[int]:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def a__ ( self ) -> Dict:
return self.n_gpu > 0
| 307 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = set()
__SCREAMING_SNAKE_CASE : str = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
__SCREAMING_SNAKE_CASE : int = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
__SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( snake_case ):
"""simple docstring"""
return values.split(''',''' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase_ = extract_warnings(args.output_dir, args.targets)
lowercase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 74 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a (lowerCAmelCase__ , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MgpstrTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
_SCREAMING_SNAKE_CASE = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
def UpperCamelCase ( self , **A__ ) -> Tuple:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase ( self , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = '''tester'''
_SCREAMING_SNAKE_CASE = '''tester'''
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCamelCase ( self ) -> List[str]:
pass
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_SCREAMING_SNAKE_CASE = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_SCREAMING_SNAKE_CASE = tokenizer.encode([special_token] , add_special_tokens=_A )
self.assertEqual(len(_A ) , 1 )
_SCREAMING_SNAKE_CASE = tokenizer.decode(_A , skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_SCREAMING_SNAKE_CASE = self.get_input_output_texts(_A )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(_A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(_A )
self.assertNotEqual(len(_A ) , 0 )
_SCREAMING_SNAKE_CASE = tokenizer.decode(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(text_a.replace(""" """ , """""" ) , _A )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCamelCase ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCamelCase ( self ) -> Tuple:
pass
| 591 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A )
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , )
for d in range(_A )
] )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
__SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
__SCREAMING_SNAKE_CASE : Dict = hidden_states
__SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A )
__SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A )
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE : Optional[Any] = block(
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , )
# 3. Output
__SCREAMING_SNAKE_CASE : Any = self.proj_out(_A )
__SCREAMING_SNAKE_CASE : List[str] = (
hidden_states[None, None, :]
.reshape(_A , _A , _A , _A , _A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_A )
| 74 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = 'swin'
SCREAMING_SNAKE_CASE__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,a_=224 ,a_=4 ,a_=3 ,a_=96 ,a_=[2, 2, 6, 2] ,a_=[3, 6, 12, 24] ,a_=7 ,a_=4.0 ,a_=True ,a_=0.0 ,a_=0.0 ,a_=0.1 ,a_="gelu" ,a_=False ,a_=0.02 ,a_=1e-5 ,a_=32 ,a_=None ,a_=None ,**a_ ,):
"""simple docstring"""
super().__init__(**_A )
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = len(_A )
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ = int(embed_dim * 2 ** (len(_A ) - 1) )
lowerCAmelCase__ = ['''stem'''] + [f'stage{idx}' for idx in range(1 ,len(_A ) + 1 )]
lowerCAmelCase__ = get_aligned_output_features_output_indices(
out_features=_A ,out_indices=_A ,stage_names=self.stage_names )
class __snake_case ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 1e-4
| 193 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Any , *__snake_case : List[Any] , **__snake_case : Optional[int] )-> List[Any]:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__snake_case : List[str] , **__snake_case : Union[str, Any] )-> Tuple:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__snake_case : Any , **__snake_case : Optional[int] )-> Union[str, Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : List[Any] , *__snake_case : str , **__snake_case : Union[str, Any] )-> Optional[int]:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *__snake_case : List[Any] , **__snake_case : Optional[int] )-> List[Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *__snake_case : Dict , **__snake_case : int )-> Any:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *__snake_case : Union[str, Any] , **__snake_case : Optional[int] )-> Tuple:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : str , *__snake_case : Tuple , **__snake_case : Any )-> Tuple:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Dict , *__snake_case : int , **__snake_case : Optional[Any] )-> int:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Optional[Any] , *__snake_case : int , **__snake_case : List[Any] )-> str:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__snake_case : Union[str, Any] , **__snake_case : Optional[int] )-> Optional[Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *__snake_case : Any , **__snake_case : List[Any] )-> List[Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *__snake_case : Any , **__snake_case : Any )-> Union[str, Any]:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__snake_case : List[Any] , **__snake_case : Any )-> List[str]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__snake_case : str , **__snake_case : str )-> Optional[Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *__snake_case : Union[str, Any] , **__snake_case : int )-> str:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *__snake_case : int , **__snake_case : Optional[int] )-> Union[str, Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Any , *__snake_case : int , **__snake_case : List[Any] )-> Union[str, Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 369 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Tuple = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2'''
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_params
__SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[str] = shard(_A )
__SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 74 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,*_a : str ,**_a : Any ):
'''simple docstring'''
super().__init__(*_A ,**_A )
requires_backends(self ,'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __lowercase ( self : List[Any] ,_a : Optional[int]=None ):
'''simple docstring'''
_a : List[str] = {}
if top_k is not None:
_a : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] ,_a : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**_a : int ):
'''simple docstring'''
return super().__call__(_A ,**_A )
def __lowercase ( self : Optional[Any] ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = load_image(_A )
_a : List[str] = self.image_processor(images=_A ,return_tensors=self.framework )
return model_inputs
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = self.model(**_A )
return model_outputs
def __lowercase ( self : int ,_a : Any ,_a : str=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
_a : str = self.model.config.num_labels
if self.framework == "pt":
_a : Dict = model_outputs.logits.softmax(-1 )[0]
_a : int = probs.topk(_A )
elif self.framework == "tf":
_a : Union[str, Any] = stable_softmax(model_outputs.logits ,axis=-1 )[0]
_a : Tuple = tf.math.top_k(_A ,k=_A )
_a : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
_a : Optional[Any] = scores.tolist()
_a : Union[str, Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_A ,_A )]
| 229 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2FeatureExtractor"""]
lowercase_ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : Union[str, Any] ) -> Dict:
if n == 1 or not isinstance(lowerCamelCase__, lowerCamelCase__ ):
return 0
elif n == 2:
return 1
else:
_SCREAMING_SNAKE_CASE : Any = [0, 1]
for i in range(2, n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any] ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[int] = 2
while digits < n:
index += 1
_SCREAMING_SNAKE_CASE : List[Any] = len(str(fibonacci(lowerCamelCase__ ) ) )
return index
def _lowerCAmelCase ( lowerCamelCase__ : Tuple = 1_0_0_0 ) -> Optional[Any]:
return fibonacci_digits_index(lowerCamelCase__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 572 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileBertTokenizer
lowerCAmelCase_ = MobileBertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
lowerCAmelCase_ = '''google/mobilebert-uncased'''
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__SCREAMING_SNAKE_CASE : Dict = {}
for i, token in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[str] = i
__SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
__SCREAMING_SNAKE_CASE : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有''']
__SCREAMING_SNAKE_CASE : int = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE : List[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 74 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase :
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ = self.get_dummy_inputs(_A )
UpperCAmelCase__ = inputs['''prompt''']
UpperCAmelCase__ = inputs['''generator''']
UpperCAmelCase__ = inputs['''num_inference_steps''']
UpperCAmelCase__ = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase__ = inputs['''image''']
else:
UpperCAmelCase__ = None
if "mask_image" in inputs:
UpperCAmelCase__ = inputs['''mask_image''']
else:
UpperCAmelCase__ = None
if "original_image" in inputs:
UpperCAmelCase__ = inputs['''original_image''']
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase__ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase__ = image
if mask_image is not None:
UpperCAmelCase__ = mask_image
if original_image is not None:
UpperCAmelCase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase__ = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase__ = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase__ = self.get_dummy_inputs(_A )
UpperCAmelCase__ = inputs['''generator''']
UpperCAmelCase__ = inputs['''num_inference_steps''']
UpperCAmelCase__ = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase__ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase__ = image
if mask_image is not None:
UpperCAmelCase__ = mask_image
if original_image is not None:
UpperCAmelCase__ = original_image
UpperCAmelCase__ = pipe_loaded(**_A )[0]
UpperCAmelCase__ = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ = self.get_dummy_inputs(_A )
UpperCAmelCase__ = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase__ = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase__ = self.get_dummy_inputs(_A )
UpperCAmelCase__ = pipe_loaded(**_A )[0]
UpperCAmelCase__ = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 146 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 74 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
SCREAMING_SNAKE_CASE__ = {
'ctrl': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = {
'Pregnancy': 1_6_8_6_2_9,
'Christianity': 7_6_7_5,
'Explain': 1_0_6_4_2_3,
'Fitness': 6_3_4_4_0,
'Saving': 6_3_1_6_3,
'Ask': 2_7_1_7_1,
'Ass': 9_5_9_8_5,
'Joke': 1_6_3_5_0_9,
'Questions': 4_5_6_2_2,
'Thoughts': 4_9_6_0_5,
'Retail': 5_2_3_4_2,
'Feminism': 1_6_4_3_3_8,
'Writing': 1_1_9_9_2,
'Atheism': 1_9_2_2_6_3,
'Netflix': 4_8_6_1_6,
'Computing': 3_9_6_3_9,
'Opinion': 4_3_2_1_3,
'Alone': 4_4_9_6_7,
'Funny': 5_8_9_1_7,
'Gaming': 4_0_3_5_8,
'Human': 4_0_8_8,
'India': 1_3_3_1,
'Joker': 7_7_1_3_8,
'Diet': 3_6_2_0_6,
'Legal': 1_1_8_5_9,
'Norman': 4_9_3_9,
'Tip': 7_2_6_8_9,
'Weight': 5_2_3_4_3,
'Movies': 4_6_2_7_3,
'Running': 2_3_4_2_5,
'Science': 2_0_9_0,
'Horror': 3_7_7_9_3,
'Confession': 6_0_5_7_2,
'Finance': 1_2_2_5_0,
'Politics': 1_6_3_6_0,
'Scary': 1_9_1_9_8_5,
'Support': 1_2_6_5_4,
'Technologies': 3_2_5_1_6,
'Teenage': 6_6_1_6_0,
'Event': 3_2_7_6_9,
'Learned': 6_7_4_6_0,
'Notion': 1_8_2_7_7_0,
'Wikipedia': 3_7_5_8_3,
'Books': 6_6_6_5,
'Extract': 7_6_0_5_0,
'Confessions': 1_0_2_7_0_1,
'Conspiracy': 7_5_9_3_2,
'Links': 6_3_6_7_4,
'Narcissus': 1_5_0_4_2_5,
'Relationship': 5_4_7_6_6,
'Relationships': 1_3_4_7_9_6,
'Reviews': 4_1_6_7_1,
'News': 4_2_5_6,
'Translation': 2_6_8_2_0,
'multilingual': 1_2_8_4_0_6,
}
def lowercase__ ( __UpperCamelCase )-> Dict:
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(__UpperCamelCase )
return pairs
class a_ ( lowerCAmelCase__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTROL_CODES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__(unk_token=_A , **_A )
with open(_A , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(_A )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(_A , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(_A , range(len(_A ) ) ) )
UpperCamelCase = {}
@property
def A__ ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def A__ ( self ) -> Any:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(_A )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCamelCase = get_pairs(_A )
if not pairs:
return token
while True:
UpperCamelCase = min(_A , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(_A ):
try:
UpperCamelCase = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(_A )
UpperCamelCase = new_word
if len(_A ) == 1:
break
else:
UpperCamelCase = get_pairs(_A )
UpperCamelCase = '''@@ '''.join(_A )
UpperCamelCase = word[:-4]
UpperCamelCase = word
return word
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(R"""\S+\n?""" , _A )
for token in words:
split_tokens.extend(list(self.bpe(_A ).split(""" """ ) ) )
return split_tokens
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = ''' '''.join(_A ).replace("""@@ """ , """""" ).strip()
return out_string
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Optional[Any]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + """\n""" )
UpperCamelCase = 0
with open(_A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(_A ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 301 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = ParquetConfig
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : str , _A : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 74 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
_A : int = ['''pixel_values''']
def __init__( self : Optional[Any] , _a : bool = True , _a : Dict[str, int] = None , _a : float = None , _a : PILImageResampling = PILImageResampling.BILINEAR , _a : bool = True , _a : Union[int, float] = 1 / 255 , _a : bool = True , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , **_a : Dict , ):
super().__init__(**_A )
UpperCamelCase__ = size if size is not None else {'''shortest_edge''': 384}
UpperCamelCase__ = get_size_dict(_A , default_to_square=_A )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCamelCase__ = crop_pct if crop_pct is not None else 224 / 256
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Union[str, Any] , _a : np.ndarray , _a : Dict[str, int] , _a : float , _a : PILImageResampling = PILImageResampling.BICUBIC , _a : Optional[Union[str, ChannelDimension]] = None , **_a : str , ):
UpperCamelCase__ = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" )
UpperCamelCase__ = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCamelCase__ = int(shortest_edge / crop_pct )
UpperCamelCase__ = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCamelCase__ = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def A_ ( self : str , _a : np.ndarray , _a : Union[int, float] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Tuple , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def A_ ( self : Dict , _a : np.ndarray , _a : Union[float, List[float]] , _a : Union[float, List[float]] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Union[str, Any] , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def A_ ( self : Tuple , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : float = None , _a : PILImageResampling = None , _a : bool = None , _a : float = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[str, TensorType]] = None , _a : ChannelDimension = ChannelDimension.FIRST , **_a : int , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(_A , default_to_square=_A )
UpperCamelCase__ = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCamelCase__ = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 240 |
from math import isclose, sqrt
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__SCREAMING_SNAKE_CASE : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus
__SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( snake_case = 1.4 , snake_case = -9.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : float = first_x_coord
__SCREAMING_SNAKE_CASE : float = first_y_coord
__SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74 | 0 |
'''simple docstring'''
import math
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Any = []
_UpperCamelCase : List[str] = 2
_UpperCamelCase : Dict = int(math.sqrt(UpperCAmelCase_ ) ) # Size of every segment
_UpperCamelCase : Tuple = [True] * (end + 1)
_UpperCamelCase : Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(UpperCAmelCase_ )
for i in range(start * start , end + 1 , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = False
start += 1
prime += in_prime
_UpperCamelCase : str = end + 1
_UpperCamelCase : Optional[Any] = min(2 * end , UpperCAmelCase_ )
while low <= n:
_UpperCamelCase : Dict = [True] * (high - low + 1)
for each in in_prime:
_UpperCamelCase : int = math.floor(low / each ) * each
if t < low:
t += each
for j in range(UpperCAmelCase_ , high + 1 , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = False
for j in range(len(UpperCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
_UpperCamelCase : List[str] = high + 1
_UpperCamelCase : List[str] = min(high + end , UpperCAmelCase_ )
return prime
print(sieve(10**6))
| 195 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : int = min_resolution
__SCREAMING_SNAKE_CASE : Optional[int] = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Union[str, Any] = size
__SCREAMING_SNAKE_CASE : str = do_center_crop
__SCREAMING_SNAKE_CASE : Any = crop_size
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''crop_size''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 74 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> Tuple:
lowerCAmelCase__ : List[str] = [0 for i in range(len(lowercase__ ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase__ : Optional[int] = 0, 0
for i in range(1 , len(lowercase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase__ : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase__ : Dict = min_edge
while go_next(lowercase__ , lowercase__ , lowercase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase__ : Dict = i, i + z_result[i] - 1
return z_result
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
return i + z_result[i] < len(lowercase__ ) and s[z_result[i]] == s[i + z_result[i]]
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Tuple:
lowerCAmelCase__ : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase__ : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowercase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 453 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
__SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__SCREAMING_SNAKE_CASE : Dict = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1
return z_result
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
return len(set(snake_case_ ) ) == len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (lowerCAmelCase__ , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Tuple:
super().setUp()
_SCREAMING_SNAKE_CASE = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> int:
_SCREAMING_SNAKE_CASE = '''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = '''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=_A )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=_A )
_SCREAMING_SNAKE_CASE = '''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(_A ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=_A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Tuple:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_A )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(_A , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = ['''的''', '''人''', '''有''']
_SCREAMING_SNAKE_CASE = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(_A )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(_A )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 591 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '''ylacombe/bark-small'''
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = '''en_speaker_1'''
lowerCAmelCase__ = '''This is a test string'''
lowerCAmelCase__ = '''speaker_embeddings_path.json'''
lowerCAmelCase__ = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint ,**_A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BarkProcessor(tokenizer=_A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
lowerCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowerCAmelCase__ = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token='(BOS)' ,eos_token='(EOS)' ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
lowerCAmelCase__ = 35
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = {
'''semantic_prompt''': np.ones(_A ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCAmelCase__ = processor(text=self.input_string ,voice_preset=_A )
lowerCAmelCase__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(_A ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCAmelCase__ = os.path.join(self.tmpdirname ,'file.npz' )
np.savez(_A ,**_A )
lowerCAmelCase__ = processor(text=self.input_string ,voice_preset=_A )
lowerCAmelCase__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(_A ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCAmelCase__ = processor(text=self.input_string ,voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BarkProcessor(tokenizer=_A )
lowerCAmelCase__ = processor(text=self.input_string )
lowerCAmelCase__ = tokenizer(
self.input_string ,padding='max_length' ,max_length=256 ,add_special_tokens=_A ,return_attention_mask=_A ,return_token_type_ids=_A ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 193 |
from pathlib import Path
import fire
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = Path(snake_case )
__SCREAMING_SNAKE_CASE : Dict = Path(snake_case )
dest_dir.mkdir(exist_ok=snake_case )
for path in src_dir.iterdir():
__SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name )
print(snake_case )
dest_path.open('''w''' ).write('''\n'''.join(snake_case ) )
if __name__ == "__main__":
fire.Fire(minify)
| 74 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Any:
snake_case = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCamelCase ( __lowerCAmelCase : Any = 1_00 ) -> Any:
snake_case = 1
snake_case = 2
for i in range(2 , max_n + 1 ):
snake_case = pre_numerator
snake_case = 2 * i // 3 if i % 3 == 0 else 1
snake_case = cur_numerator
snake_case = e_cont * pre_numerator + temp
return sum_digits(__lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 369 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 )
__SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 74 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Tuple = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
_a : int = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] ,dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ),
}
_a : Any = model(_A )['''last_hidden_state''']
_a : Dict = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape ,_A )
# compare the actual values for a slice.
_a : Dict = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 229 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCAmelCase_ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE : Dict = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE : Any = self.validation_dir
__SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCAmelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = input_size
__SCREAMING_SNAKE_CASE : List[str] = mask_patch_size
__SCREAMING_SNAKE_CASE : Dict = model_patch_size
__SCREAMING_SNAKE_CASE : int = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size
__SCREAMING_SNAKE_CASE : int = self.rand_size**2
__SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) )
__SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] )
__SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a__ ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE : int = split['''train''']
__SCREAMING_SNAKE_CASE : Dict = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : List[Any] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case , '''decoder_type''' ):
__SCREAMING_SNAKE_CASE : Any = '''simmim'''
# adapt config
__SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size
__SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__SCREAMING_SNAKE_CASE : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case )
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE : str = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE : List[str] = '''img'''
else:
__SCREAMING_SNAKE_CASE : Tuple = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__SCREAMING_SNAKE_CASE : Any = Compose(
[
Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__SCREAMING_SNAKE_CASE : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case ):
__SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]]
__SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case )
# Initialize our trainer
__SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : int = last_checkpoint
__SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : Optional[int] = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 572 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''data2vec-vision'''
def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : List[str] = use_mask_token
__SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias
__SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : str = out_indices
__SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
| 74 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """open-llama"""
def __init__(self , __a=100000 , __a=4096 , __a=11008 , __a=32 , __a=32 , __a="silu" , __a=2048 , __a=0.02 , __a=1E-6 , __a=True , __a=0 , __a=1 , __a=2 , __a=False , __a=True , __a=0.1 , __a=0.1 , __a=True , __a=True , __a=None , **__a , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = rms_norm_eps
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = kwargs.pop(
'use_memorry_efficient_attention' , _A )
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_dropout_prob
UpperCAmelCase__ = use_stable_embedding
UpperCAmelCase__ = shared_input_output_embedding
UpperCAmelCase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
UpperCAmelCase__ = self.rope_scaling.get('type' , _A )
UpperCAmelCase__ = self.rope_scaling.get('factor' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 146 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
__SCREAMING_SNAKE_CASE : str = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_choices
__SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase_ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
__SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A )
__SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 74 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCamelCase = Path(__UpperCamelCase )
UpperCamelCase = Path(__UpperCamelCase )
dest_dir.mkdir(exist_ok=__UpperCamelCase )
for path in src_dir.iterdir():
UpperCamelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCamelCase = dest_dir.joinpath(path.name )
print(__UpperCamelCase )
dest_path.open("""w""" ).write("""\n""".join(__UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 301 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase_ = None
try:
import msvcrt
except ImportError:
lowercase_ = None
try:
import fcntl
except ImportError:
lowercase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase_ = OSError
# Data
# ------------------------------------------------
lowercase_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowercase_ = """3.0.12"""
lowercase_ = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ )
return _logger
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file
return None
def __str__( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = lock
return None
def __enter__( self : Any ):
"""simple docstring"""
return self.lock
def __exit__( self : str , _A : Any , _A : int , _A : Any ):
"""simple docstring"""
self.lock.release()
return None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A )
# The path to the lock file.
__SCREAMING_SNAKE_CASE : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE : str = None
# The default timeout value.
__SCREAMING_SNAKE_CASE : Any = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE : int = 0
return None
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = float(_A )
return None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ):
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE : Tuple = id(self )
__SCREAMING_SNAKE_CASE : Any = self._lock_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : int , _A : List[str]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE : Optional[int] = id(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__SCREAMING_SNAKE_CASE : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : int ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ):
"""simple docstring"""
self.release()
return None
def __del__( self : int ):
"""simple docstring"""
self.release(force=_A )
return None
def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A , _A )
else:
return path
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(_A , timeout=_A , max_filename_length=_A )
__SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A )
except OSError:
pass
else:
try:
msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : str = fd
return None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self._lock_file_fd
__SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A , timeout=_A , max_filename_length=_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A )
try:
fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : int = fd
return None
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd
__SCREAMING_SNAKE_CASE : Any = None
fcntl.flock(_A , fcntl.LOCK_UN )
os.close(_A )
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = fd
return None
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase_ = None
if msvcrt:
lowercase_ = WindowsFileLock
elif fcntl:
lowercase_ = UnixFileLock
else:
lowercase_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 74 | 0 |
import os
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = os.path.join(os.path.dirname(UpperCamelCase__ ), '''num.txt''' )
with open(UpperCamelCase__ ) as file_hand:
return str(sum(int(UpperCamelCase__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 240 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
__SCREAMING_SNAKE_CASE : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = False
# Check that strings has a valid type
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(_A )}.''' )
__SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
__SCREAMING_SNAKE_CASE : Dict = [html_strings]
# Get nodes + xpaths
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
for html_string in html_strings:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A )
nodes.append(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
__SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
__SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 74 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[str] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class lowercase__ ( lowerCAmelCase__ ):
lowercase__ = """deta"""
lowercase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : List[str] ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : Optional[int]=900 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : int=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : Tuple=1024 ,lowerCamelCase__ : Tuple=8 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[Any]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Optional[int]="sine" ,lowerCamelCase__ : Optional[int]=5 ,lowerCamelCase__ : List[Any]=4 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Tuple=300 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Tuple=1 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : Union[str, Any]=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : Optional[int]=0.2_5 ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_UpperCamelCase : Dict = CONFIG_MAPPING['''resnet'''](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(_A ,_A ):
_UpperCamelCase : int = backbone_config.pop('model_type' )
_UpperCamelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase : Any = config_class.from_dict(_A )
_UpperCamelCase : Optional[Any] = backbone_config
_UpperCamelCase : Tuple = num_queries
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Tuple = d_model
_UpperCamelCase : Any = encoder_ffn_dim
_UpperCamelCase : int = encoder_layers
_UpperCamelCase : List[str] = encoder_attention_heads
_UpperCamelCase : Tuple = decoder_ffn_dim
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Tuple = dropout
_UpperCamelCase : int = attention_dropout
_UpperCamelCase : Dict = activation_dropout
_UpperCamelCase : str = activation_function
_UpperCamelCase : List[str] = init_std
_UpperCamelCase : Union[str, Any] = init_xavier_std
_UpperCamelCase : Optional[Any] = encoder_layerdrop
_UpperCamelCase : Tuple = auxiliary_loss
_UpperCamelCase : Optional[int] = position_embedding_type
# deformable attributes
_UpperCamelCase : Dict = num_feature_levels
_UpperCamelCase : Optional[int] = encoder_n_points
_UpperCamelCase : Dict = decoder_n_points
_UpperCamelCase : List[str] = two_stage
_UpperCamelCase : Dict = two_stage_num_proposals
_UpperCamelCase : List[Any] = with_box_refine
_UpperCamelCase : Optional[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_UpperCamelCase : Optional[int] = class_cost
_UpperCamelCase : List[Any] = bbox_cost
_UpperCamelCase : str = giou_cost
# Loss coefficients
_UpperCamelCase : Optional[int] = mask_loss_coefficient
_UpperCamelCase : Tuple = dice_loss_coefficient
_UpperCamelCase : int = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : int = eos_coefficient
_UpperCamelCase : List[str] = focal_alpha
super().__init__(is_encoder_decoder=_A ,**_A )
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.d_model
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.backbone_config.to_dict()
_UpperCamelCase : Optional[Any] = self.__class__.model_type
return output
| 195 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ):
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case )
else:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case )
if hidden_sizes == 192:
__SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case )
if hidden_sizes == 256:
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case )
if hidden_sizes == 384:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case )
from_model.eval()
__SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval()
__SCREAMING_SNAKE_CASE : int = OrderedDict()
__SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict()
__SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() )
print(len(snake_case ) , len(snake_case ) )
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE : int = weights[og_keys[i]]
our_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) )
__SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits
assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE : Union[str, Any] = name
print(snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a__ ( snake_case , snake_case = None , snake_case = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : int = 1_000
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : str = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 74 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase__ : Tuple = model_name.find("patch" )
lowerCAmelCase__ : Union[str, Any] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
lowerCAmelCase__ : Tuple = XCLIPVisionConfig(patch_size=lowercase__ , num_frames=lowercase__ )
if "large" in model_name:
lowerCAmelCase__ : Optional[Any] = 7_6_8
lowerCAmelCase__ : Optional[int] = 3_0_7_2
lowerCAmelCase__ : Optional[Any] = 1_2
lowerCAmelCase__ : Optional[Any] = 1_0_2_4
lowerCAmelCase__ : int = 4_0_9_6
lowerCAmelCase__ : Tuple = 1_6
lowerCAmelCase__ : Optional[int] = 2_4
lowerCAmelCase__ : Optional[int] = 7_6_8
lowerCAmelCase__ : Optional[int] = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : Any = 3_3_6
lowerCAmelCase__ : Any = XCLIPConfig.from_text_vision_configs(lowercase__ , lowercase__ )
if "large" in model_name:
lowerCAmelCase__ : Any = 7_6_8
return config
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> str:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase__ : List[str] = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
lowerCAmelCase__ : List[str] = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ : str = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ : List[str] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ : Dict = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
lowerCAmelCase__ : Any = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase__ : Dict = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
lowerCAmelCase__ : List[str] = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase__ : Optional[Any] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
lowerCAmelCase__ : Tuple = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
lowerCAmelCase__ : List[Any] = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
lowerCAmelCase__ : Any = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
lowerCAmelCase__ : List[str] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
lowerCAmelCase__ : Dict = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase__ : str = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
lowerCAmelCase__ : Optional[int] = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase__ : Any = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
lowerCAmelCase__ : Union[str, Any] = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
lowerCAmelCase__ : Tuple = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Tuple:
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : Tuple = orig_state_dict.pop(lowercase__ )
if "attn.in_proj" in key:
lowerCAmelCase__ : Optional[Any] = key.split("." )
if key.startswith("visual" ):
lowerCAmelCase__ : List[Any] = key_split[3]
lowerCAmelCase__ : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase__ : Union[str, Any] = val[
:dim, :
]
lowerCAmelCase__ : str = val[
dim : dim * 2, :
]
lowerCAmelCase__ : Tuple = val[
-dim:, :
]
else:
lowerCAmelCase__ : Optional[Any] = val[
:dim
]
lowerCAmelCase__ : Tuple = val[
dim : dim * 2
]
lowerCAmelCase__ : Tuple = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase__ : Tuple = val[
:dim, :
]
lowerCAmelCase__ : str = val[
dim : dim * 2, :
]
lowerCAmelCase__ : str = val[
-dim:, :
]
else:
lowerCAmelCase__ : Dict = val[:dim]
lowerCAmelCase__ : str = val[
dim : dim * 2
]
lowerCAmelCase__ : Tuple = val[-dim:]
elif key.startswith("mit" ):
lowerCAmelCase__ : List[str] = key_split[2]
lowerCAmelCase__ : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase__ : str = val[:dim, :]
lowerCAmelCase__ : Tuple = val[dim : dim * 2, :]
lowerCAmelCase__ : Optional[int] = val[-dim:, :]
else:
lowerCAmelCase__ : Any = val[:dim]
lowerCAmelCase__ : Any = val[dim : dim * 2]
lowerCAmelCase__ : Optional[Any] = val[-dim:]
else:
lowerCAmelCase__ : Optional[Any] = key_split[2]
lowerCAmelCase__ : Any = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Tuple = val[:dim, :]
lowerCAmelCase__ : int = val[
dim : dim * 2, :
]
lowerCAmelCase__ : Dict = val[-dim:, :]
else:
lowerCAmelCase__ : Tuple = val[:dim]
lowerCAmelCase__ : str = val[
dim : dim * 2
]
lowerCAmelCase__ : int = val[-dim:]
else:
lowerCAmelCase__ : int = rename_key(lowercase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase__ : int = val.T
lowerCAmelCase__ : Union[str, Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> str:
if num_frames == 8:
lowerCAmelCase__ : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
lowerCAmelCase__ : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
lowerCAmelCase__ : Dict = '''eating_spaghetti_32_frames.npy'''
lowerCAmelCase__ : List[str] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=lowercase__ , repo_type="dataset" , )
lowerCAmelCase__ : int = np.load(lowercase__ )
return list(lowercase__ )
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=None , lowercase__=False ) -> Optional[Any]:
lowerCAmelCase__ : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
lowerCAmelCase__ : Optional[Any] = model_to_url[model_name]
lowerCAmelCase__ : Any = 8
if "16-frames" in model_name:
lowerCAmelCase__ : Optional[int] = 1_6
elif "shot" in model_name:
lowerCAmelCase__ : Optional[Any] = 3_2
lowerCAmelCase__ : List[str] = get_xclip_config(lowercase__ , lowercase__ )
lowerCAmelCase__ : Tuple = XCLIPModel(lowercase__ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
lowerCAmelCase__ : Union[str, Any] = torch.load(lowercase__ , map_location="cpu" )['''model''']
else:
lowerCAmelCase__ : str = torch.hub.load_state_dict_from_url(lowercase__ )['''model''']
lowerCAmelCase__ : List[Any] = convert_state_dict(lowercase__ , lowercase__ )
lowerCAmelCase__ : Union[str, Any] = XCLIPModel(lowercase__ )
lowerCAmelCase__ : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase__ : Any = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
lowerCAmelCase__ : str = VideoMAEImageProcessor(size=lowercase__ )
lowerCAmelCase__ : int = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ : Optional[int] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ : List[Any] = XCLIPProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
lowerCAmelCase__ : Dict = prepare_video(lowercase__ )
lowerCAmelCase__ : List[str] = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=lowercase__ , return_tensors="pt" , padding=lowercase__ )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**lowercase__ )
# Verify outputs
lowerCAmelCase__ : Dict = outputs.logits_per_video
lowerCAmelCase__ : Tuple = logits_per_video.softmax(dim=1 )
print("Probs:" , lowercase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase__ : List[Any] = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase__ : Optional[int] = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase__ : Dict = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase__ : List[Any] = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : Optional[int] = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase__ : List[str] = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase__ : str = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase__ : int = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase__ : Tuple = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase__ : Dict = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase__ : Tuple = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase__ : Optional[int] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase__ : Tuple = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase__ : List[Any] = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(lowercase__ , organization="nielsr" )
processor.push_to_hub(lowercase__ , organization="nielsr" )
slow_tokenizer.push_to_hub(lowercase__ , organization="nielsr" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 453 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = set()
_A : str = []
def parse_line(snake_case_ ):
for line in fp:
if isinstance(snake_case_,snake_case_ ):
_A : List[Any] = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case_ ) > 0:
_A : List[Any] = '''\n'''.join(snake_case_ )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(snake_case_ )
buffer.clear()
continue
else:
_A : int = line.strip()
buffer.append(snake_case_ )
if from_gh:
for filename in os.listdir(snake_case_ ):
_A : Any = os.path.join(snake_case_,snake_case_ )
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case_ ) as fp:
parse_line(snake_case_ )
else:
try:
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case_ ) as fp:
parse_line(snake_case_ )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = set()
_A : List[Any] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case_,snake_case_ ) )
return selected_warnings
if __name__ == "__main__":
def lowerCAmelCase_ ( snake_case_ ):
return values.split(""",""" )
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
_snake_case = parser.parse_args()
_snake_case = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_snake_case = extract_warnings(args.output_dir, args.targets)
_snake_case = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 307 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = set()
__SCREAMING_SNAKE_CASE : str = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
__SCREAMING_SNAKE_CASE : int = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
__SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( snake_case ):
"""simple docstring"""
return values.split(''',''' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase_ = extract_warnings(args.output_dir, args.targets)
lowercase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 74 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = 1_28
elif "12-12" in model_name:
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
_SCREAMING_SNAKE_CASE = 14
_SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 16
else:
raise ValueError("""Model not supported""" )
_SCREAMING_SNAKE_CASE = '''huggingface/label-files'''
if "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = 35
_SCREAMING_SNAKE_CASE = '''speech-commands-v2-id2label.json'''
else:
_SCREAMING_SNAKE_CASE = 5_27
_SCREAMING_SNAKE_CASE = '''audioset-id2label.json'''
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if "module.v" in name:
_SCREAMING_SNAKE_CASE = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
_SCREAMING_SNAKE_CASE = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
_SCREAMING_SNAKE_CASE = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
_SCREAMING_SNAKE_CASE = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_SCREAMING_SNAKE_CASE = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
_SCREAMING_SNAKE_CASE = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
_SCREAMING_SNAKE_CASE = int(key_split[3] )
_SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
_SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
_SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
_SCREAMING_SNAKE_CASE = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_SCREAMING_SNAKE_CASE = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
_SCREAMING_SNAKE_CASE = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
_SCREAMING_SNAKE_CASE = 10_24 if '''speech-commands''' not in model_name else 1_28
_SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
_SCREAMING_SNAKE_CASE = dataset[0]['''audio''']['''array''']
else:
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
_SCREAMING_SNAKE_CASE = torchaudio.load(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
_SCREAMING_SNAKE_CASE = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=1_60_00 , return_tensors="""pt""" )
# forward pass
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_SCREAMING_SNAKE_CASE = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_SCREAMING_SNAKE_CASE = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError("""Logits don\'t match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 591 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A )
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , )
for d in range(_A )
] )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
__SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
__SCREAMING_SNAKE_CASE : Dict = hidden_states
__SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A )
__SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A )
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE : Optional[Any] = block(
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , )
# 3. Output
__SCREAMING_SNAKE_CASE : Any = self.proj_out(_A )
__SCREAMING_SNAKE_CASE : List[str] = (
hidden_states[None, None, :]
.reshape(_A , _A , _A , _A , _A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_A )
| 74 | 0 |
def UpperCAmelCase_ ( snake_case__ ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = len(snake_case__ )
lowerCAmelCase__ = len(matrix[0] )
lowerCAmelCase__ = min(snake_case__ , snake_case__ )
for row in range(snake_case__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , snake_case__ ):
lowerCAmelCase__ = matrix[col][row] / matrix[row][row]
for i in range(snake_case__ , snake_case__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ = True
for i in range(row + 1 , snake_case__ ):
if matrix[i][row] != 0:
lowerCAmelCase__ = matrix[i], matrix[row]
lowerCAmelCase__ = False
break
if reduce:
rank -= 1
for i in range(snake_case__ ):
lowerCAmelCase__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def lowerCAmelCase ( *__snake_case : Tuple , **__snake_case : List[str] )-> Optional[int]:
pass
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Any )-> List[str]:
snake_case = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
snake_case = [
{
'''image''': Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCAmelCase ( self : Any , __snake_case : Dict , __snake_case : Optional[Any] )-> str:
snake_case = vqa_pipeline(_A , top_k=1 )
self.assertEqual(
_A , [
[{"""score""": ANY(_A ), """answer""": ANY(_A )}],
[{"""score""": ANY(_A ), """answer""": ANY(_A )}],
] , )
@require_torch
def lowerCAmelCase ( self : Dict )-> List[str]:
snake_case = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
snake_case = '''How many cats are there?'''
snake_case = vqa_pipeline(image=_A , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
_A , [{"""score""": ANY(_A ), """answer""": ANY(_A )}, {"""score""": ANY(_A ), """answer""": ANY(_A )}] )
snake_case = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
_A , [{"""score""": ANY(_A ), """answer""": ANY(_A )}, {"""score""": ANY(_A ), """answer""": ANY(_A )}] )
@slow
@require_torch
def lowerCAmelCase ( self : List[str] )-> Tuple:
snake_case = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
snake_case = '''How many cats are there?'''
snake_case = vqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
snake_case = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
snake_case = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowerCAmelCase ( self : str )-> Dict:
pass
| 369 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Tuple = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2'''
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_params
__SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[str] = shard(_A )
__SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 74 | 0 |
'''simple docstring'''
__lowerCAmelCase = """Input must be a string of 8 numbers plus letter"""
__lowerCAmelCase = """TRWAGMYFPDXBNJZSQVHLCKE"""
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
if not isinstance(__a , __a ):
_a : List[Any] = f"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[str] = spanish_id.replace('-' , '' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Optional[Any] = int(spanish_id_clean[0:8] )
_a : Tuple = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2FeatureExtractor"""]
lowercase_ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCamelCase :
def __init__( self , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : int = 13
_SCREAMING_SNAKE_CASE : Any = 7
_SCREAMING_SNAKE_CASE : List[str] = 30
_SCREAMING_SNAKE_CASE : List[Any] = self.seq_length + self.mem_len
_SCREAMING_SNAKE_CASE : Union[str, Any] = 15
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = 99
_SCREAMING_SNAKE_CASE : Optional[Any] = [10, 50, 80]
_SCREAMING_SNAKE_CASE : int = 32
_SCREAMING_SNAKE_CASE : int = 32
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Union[str, Any] = 8
_SCREAMING_SNAKE_CASE : Union[str, Any] = 128
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : Optional[Any] = 2
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Dict = 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : int = 3
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.01
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = TFTransfoXLModel(_A )
_SCREAMING_SNAKE_CASE : Dict = model(_A ).to_tuple()
_SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids_a, '''mems''': mems_a}
_SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = TFTransfoXLLMHeadModel(_A )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ).to_tuple()
_SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
_SCREAMING_SNAKE_CASE : Tuple = model(_A ).to_tuple()
_SCREAMING_SNAKE_CASE : Dict = model([input_ids_a, mems_a] ).to_tuple()
_SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
_SCREAMING_SNAKE_CASE : List[str] = model(_A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = TFTransfoXLForSequenceClassification(_A )
_SCREAMING_SNAKE_CASE : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(_SCREAMING_SNAKE_CASE) : Optional[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
A__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
A__ = () if is_tf_available() else ()
A__ = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
A__ = False
A__ = False
A__ = False
A__ = False
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFTransfoXLModelTester(self )
_SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_A , d_embed=37 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.model_tester.set_seed()
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_A )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.model_tester.set_seed()
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_A )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_A )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[int] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Dict = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_SCREAMING_SNAKE_CASE : List[Any] = model.get_output_embeddings()
assert isinstance(_A , tf.keras.layers.Layer )
_SCREAMING_SNAKE_CASE : Any = model.get_bias()
assert name is None
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE : Dict = model.get_bias()
assert name is None
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Optional[Any] = TFTransfoXLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason="This model doesn\'t play well with fit() due to not returning a single loss." )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_SCREAMING_SNAKE_CASE : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_SCREAMING_SNAKE_CASE : Optional[int] = model.generate(_A , max_length=200 , do_sample=_A )
self.assertListEqual(output_ids[0].numpy().tolist() , _A )
| 572 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileBertTokenizer
lowerCAmelCase_ = MobileBertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
lowerCAmelCase_ = '''google/mobilebert-uncased'''
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__SCREAMING_SNAKE_CASE : Dict = {}
for i, token in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[str] = i
__SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
__SCREAMING_SNAKE_CASE : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有''']
__SCREAMING_SNAKE_CASE : int = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE : List[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 74 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__SCREAMING_SNAKE_CASE = Features({"""image""": Image()} )
__SCREAMING_SNAKE_CASE = Features({"""labels""": ClassLabel} )
__SCREAMING_SNAKE_CASE = """image"""
__SCREAMING_SNAKE_CASE = """labels"""
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _A ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
UpperCAmelCase__ = copy.deepcopy(self )
UpperCAmelCase__ = self.label_schema.copy()
UpperCAmelCase__ = features[self.label_column]
UpperCAmelCase__ = label_schema
return task_template
@property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 146 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 74 | 0 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
SCREAMING_SNAKE_CASE__ = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary'
)
SCREAMING_SNAKE_CASE__ = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(6_4, 6_4)
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE__ = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE__ = 'Normal'
if result[0][0] == 1:
SCREAMING_SNAKE_CASE__ = 'Abnormality detected'
| 301 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = ParquetConfig
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : str , _A : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 74 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''roc_bert'''
def __init__( self : Tuple , _a : List[Any]=30_522 , _a : Optional[int]=768 , _a : List[str]=12 , _a : str=12 , _a : Tuple=3_072 , _a : int="gelu" , _a : Optional[Any]=0.1 , _a : Any=0.1 , _a : Optional[int]=512 , _a : Optional[int]=2 , _a : List[str]=0.02 , _a : Optional[Any]=1E-12 , _a : Tuple=True , _a : Union[str, Any]=0 , _a : Any="absolute" , _a : str=None , _a : int=True , _a : str=True , _a : List[Any]=768 , _a : Any=910 , _a : str=512 , _a : Union[str, Any]=24_858 , _a : Tuple=True , **_a : List[str] , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = enable_pronunciation
UpperCamelCase__ = enable_shape
UpperCamelCase__ = pronunciation_embed_dim
UpperCamelCase__ = pronunciation_vocab_size
UpperCamelCase__ = shape_embed_dim
UpperCamelCase__ = shape_vocab_size
UpperCamelCase__ = concat_input
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
super().__init__(pad_token_id=_A , **_A )
| 240 |
from math import isclose, sqrt
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__SCREAMING_SNAKE_CASE : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus
__SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( snake_case = 1.4 , snake_case = -9.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : float = first_x_coord
__SCREAMING_SNAKE_CASE : float = first_y_coord
__SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( lowerCAmelCase__ ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """CLIPImageProcessor"""
lowercase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Dict ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,_A ,)
_UpperCamelCase : str = kwargs.pop('feature_extractor' )
_UpperCamelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A ,_A )
def __call__( self : List[str] ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Tuple=None ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_UpperCamelCase : Optional[int] = self.tokenizer(_A ,return_tensors=_A ,**_A )
if images is not None:
_UpperCamelCase : str = self.image_processor(_A ,return_tensors=_A ,**_A )
if text is not None and images is not None:
_UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) ,tensor_type=_A )
def UpperCamelCase_ ( self : List[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A ,**_A )
def UpperCamelCase_ ( self : Optional[int] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_A ,**_A )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.tokenizer.model_input_names
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,_A ,)
return self.image_processor_class
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,_A ,)
return self.image_processor
| 195 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : int = min_resolution
__SCREAMING_SNAKE_CASE : Optional[int] = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Union[str, Any] = size
__SCREAMING_SNAKE_CASE : str = do_center_crop
__SCREAMING_SNAKE_CASE : Any = crop_size
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''crop_size''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 74 | 0 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_UpperCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase__=2 , lowercase__=3 , lowercase__=1_6 , lowercase__ = 1_0 , lowercase__ = 2 ) -> Union[str, Any]:
def get_dataset(lowercase__ ):
lowerCAmelCase__ : Union[str, Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowerCAmelCase__ : str = get_dataset(lowercase__ )
lowerCAmelCase__ : Dict = get_dataset(lowercase__ )
lowerCAmelCase__ : Optional[int] = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
lowerCAmelCase__ : Optional[Any] = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ) -> Any:
lowerCAmelCase__ : Tuple = []
for epoch in range(lowercase__ ):
# Train quickly
model.train()
for batch in dataloader:
lowerCAmelCase__ : Optional[Any] = batch
lowerCAmelCase__ : List[Any] = model(lowercase__ )
lowerCAmelCase__ : str = torch.nn.functional.mse_loss(lowercase__ , lowercase__ )
accelerator.backward(lowercase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.randn(1 ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.randn(1 ) )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
return x * self.a + self.b
class __a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Optional[Any] = DummyModel()
lowerCAmelCase__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase__ : List[str] = dummy_dataloaders()
lowerCAmelCase__ : Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=_A , automatic_checkpoint_naming=_A )
# Train baseline
lowerCAmelCase__ : Optional[Any] = Accelerator(project_config=_A )
lowerCAmelCase__ : List[Any] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Union[str, Any] = DummyModel()
lowerCAmelCase__ : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase__ : Optional[int] = dummy_dataloaders()
# Train baseline
lowerCAmelCase__ : Optional[int] = Accelerator()
lowerCAmelCase__ : Optional[Any] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
lowerCAmelCase__ : List[Any] = os.path.join(_A , "initial" )
accelerator.save_state(_A )
(lowerCAmelCase__) : Any = model.a.item(), model.b.item()
lowerCAmelCase__ : Dict = optimizer.state_dict()
lowerCAmelCase__ : Tuple = train(3 , _A , _A , _A , _A )
(lowerCAmelCase__) : Union[str, Any] = model.a.item(), model.b.item()
lowerCAmelCase__ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCAmelCase__ : Optional[int] = DummyModel()
lowerCAmelCase__ : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase__ : Dict = dummy_dataloaders()
lowerCAmelCase__ : List[str] = Accelerator()
lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(_A )
(lowerCAmelCase__) : Any = model.a.item(), model.b.item()
lowerCAmelCase__ : Dict = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
lowerCAmelCase__ : Tuple = train(2 , _A , _A , _A , _A )
# Save everything
lowerCAmelCase__ : Dict = os.path.join(_A , "checkpoint" )
accelerator.save_state(_A )
# Load everything back in and make sure all states work
accelerator.load_state(_A )
test_rands += train(1 , _A , _A , _A , _A )
(lowerCAmelCase__) : Tuple = model.a.item(), model.b.item()
lowerCAmelCase__ : str = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : List[str] = DummyModel()
lowerCAmelCase__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase__ : str = dummy_dataloaders()
lowerCAmelCase__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
lowerCAmelCase__ : Any = Accelerator(project_dir=_A , project_config=_A )
lowerCAmelCase__ : int = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
(lowerCAmelCase__) : Optional[int] = model.a.item(), model.b.item()
lowerCAmelCase__ : Tuple = optimizer.state_dict()
lowerCAmelCase__ : Any = train(3 , _A , _A , _A , _A )
(lowerCAmelCase__) : Union[str, Any] = model.a.item(), model.b.item()
lowerCAmelCase__ : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCAmelCase__ : Optional[int] = DummyModel()
lowerCAmelCase__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase__ : Optional[Any] = dummy_dataloaders()
lowerCAmelCase__ : Optional[int] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_A )
lowerCAmelCase__ : List[str] = Accelerator(project_dir=_A , project_config=_A )
lowerCAmelCase__ : Tuple = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(os.path.join(_A , "checkpoints" , "checkpoint_0" ) )
(lowerCAmelCase__) : Optional[Any] = model.a.item(), model.b.item()
lowerCAmelCase__ : Optional[int] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
lowerCAmelCase__ : Optional[int] = train(2 , _A , _A , _A , _A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , _A , _A , _A , _A )
(lowerCAmelCase__) : Optional[int] = model.a.item(), model.b.item()
lowerCAmelCase__ : int = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = torch.tensor([1, 2, 3] )
lowerCAmelCase__ : str = torch.tensor([2, 3, 4] )
lowerCAmelCase__ : List[str] = DummyModel()
lowerCAmelCase__ : int = torch.optim.Adam(net.parameters() )
lowerCAmelCase__ : int = Accelerator()
with self.assertRaises(_A ) as ve:
accelerator.register_for_checkpointing(_A , _A , _A , _A )
lowerCAmelCase__ : List[str] = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : List[str] = DummyModel()
lowerCAmelCase__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase__ : Tuple = torch.optim.lr_scheduler.StepLR(_A , step_size=1 , gamma=0.99 )
lowerCAmelCase__ : Tuple = dummy_dataloaders()
lowerCAmelCase__ : int = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
lowerCAmelCase__ : List[Any] = Accelerator(project_dir=_A , project_config=_A )
lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(
_A , _A , _A , _A , _A )
# Save initial
accelerator.save_state()
lowerCAmelCase__ : Optional[Any] = scheduler.state_dict()
train(3 , _A , _A , _A , _A , _A )
self.assertNotEqual(_A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(_A , scheduler.state_dict() )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Optional[int] = DummyModel()
lowerCAmelCase__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=_A , total_limit=2 )
# Train baseline
lowerCAmelCase__ : Optional[Any] = Accelerator(project_dir=_A , project_config=_A )
lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(_A )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_A , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
_UpperCamelCase = """/tmp/accelerate/state_checkpointing"""
_UpperCamelCase = DummyModel()
_UpperCamelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_UpperCamelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_UpperCamelCase , _UpperCamelCase = dummy_dataloaders()
_UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_UpperCamelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_UpperCamelCase , _UpperCamelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_UpperCamelCase = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
_UpperCamelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
_UpperCamelCase = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
_UpperCamelCase = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 453 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
__SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__SCREAMING_SNAKE_CASE : Dict = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1
return z_result
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
from __future__ import annotations
from statistics import mean
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = [0] * no_of_processes
_A : List[str] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(snake_case_ ):
_A : Dict = burst_time[i]
_A : list[int] = []
_A : List[Any] = 0
_A : Optional[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_A : Dict = []
_A : Optional[Any] = -1
for i in range(snake_case_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(snake_case_ )
if len(snake_case_ ) > 0:
_A : Union[str, Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_A : Union[str, Any] = i
total_time += burst_time[target_process]
completed += 1
_A : Union[str, Any] = 0
_A : Union[str, Any] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = [0] * no_of_processes
for i in range(snake_case_ ):
_A : Union[str, Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
_snake_case = 4
_snake_case = [2, 5, 3, 7]
_snake_case = [0, 0, 0, 0]
_snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_snake_case = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 307 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a (lowerCAmelCase__):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'char'
SCREAMING_SNAKE_CASE = 'bpe'
SCREAMING_SNAKE_CASE = 'wp'
UpperCamelCase__ : Union[str, Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a (lowerCAmelCase__):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['image_processor', 'char_tokenizer']
SCREAMING_SNAKE_CASE = 'ViTImageProcessor'
SCREAMING_SNAKE_CASE = 'MgpstrTokenizer'
def __init__( self , A__=None , A__=None , **A__ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
_SCREAMING_SNAKE_CASE = tokenizer
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""gpt2""" )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(_A , _A )
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Any:
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None:
_SCREAMING_SNAKE_CASE = self.char_tokenizer(_A , return_tensors=_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
_SCREAMING_SNAKE_CASE = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , A__ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = sequences
_SCREAMING_SNAKE_CASE = char_preds.size(0 )
_SCREAMING_SNAKE_CASE = self._decode_helper(_A , """char""" )
_SCREAMING_SNAKE_CASE = self._decode_helper(_A , """bpe""" )
_SCREAMING_SNAKE_CASE = self._decode_helper(_A , """wp""" )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for i in range(_A ):
_SCREAMING_SNAKE_CASE = [char_scores[i], bpe_scores[i], wp_scores[i]]
_SCREAMING_SNAKE_CASE = [char_strs[i], bpe_strs[i], wp_strs[i]]
_SCREAMING_SNAKE_CASE = scores.index(max(_A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = final_strs
_SCREAMING_SNAKE_CASE = final_scores
_SCREAMING_SNAKE_CASE = char_strs
_SCREAMING_SNAKE_CASE = bpe_strs
_SCREAMING_SNAKE_CASE = wp_strs
return out
def UpperCamelCase ( self , A__ , A__ ) -> Any:
if format == DecodeType.CHARACTER:
_SCREAMING_SNAKE_CASE = self.char_decode
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = '''[s]'''
elif format == DecodeType.BPE:
_SCREAMING_SNAKE_CASE = self.bpe_decode
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = '''#'''
elif format == DecodeType.WORDPIECE:
_SCREAMING_SNAKE_CASE = self.wp_decode
_SCREAMING_SNAKE_CASE = 1_02
_SCREAMING_SNAKE_CASE = '''[SEP]'''
else:
raise ValueError(F"Format {format} is not supported." )
_SCREAMING_SNAKE_CASE = [], []
_SCREAMING_SNAKE_CASE = pred_logits.size(0 )
_SCREAMING_SNAKE_CASE = pred_logits.size(1 )
_SCREAMING_SNAKE_CASE = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A )
_SCREAMING_SNAKE_CASE = preds_index.view(-1 , _A )[:, 1:]
_SCREAMING_SNAKE_CASE = decoder(_A )
_SCREAMING_SNAKE_CASE = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 )
_SCREAMING_SNAKE_CASE = preds_max_prob[:, 1:]
for index in range(_A ):
_SCREAMING_SNAKE_CASE = preds_str[index].find(_A )
_SCREAMING_SNAKE_CASE = preds_str[index][:pred_eos]
_SCREAMING_SNAKE_CASE = preds_index[index].cpu().tolist()
_SCREAMING_SNAKE_CASE = pred_index.index(_A ) if eos_token in pred_index else -1
_SCREAMING_SNAKE_CASE = preds_max_prob[index][: pred_eos_index + 1]
_SCREAMING_SNAKE_CASE = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_A )
conf_scores.append(_A )
return dec_strs, conf_scores
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(_A )]
return decode_strs
def UpperCamelCase ( self , A__ ) -> Tuple:
return self.bpe_tokenizer.batch_decode(_A )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(_A )]
return decode_strs
| 591 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74 | 0 |
import datasets
from .evaluate import evaluate
_lowerCAmelCase : List[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
_lowerCAmelCase : Optional[int] = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
_lowerCAmelCase : Optional[Any] = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) ,codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] ,reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCAmelCase__ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCAmelCase__ = evaluate(dataset=_A ,predictions=_A )
return score
| 193 |
from pathlib import Path
import fire
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = Path(snake_case )
__SCREAMING_SNAKE_CASE : Dict = Path(snake_case )
dest_dir.mkdir(exist_ok=snake_case )
for path in src_dir.iterdir():
__SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name )
print(snake_case )
dest_path.open('''w''' ).write('''\n'''.join(snake_case ) )
if __name__ == "__main__":
fire.Fire(minify)
| 74 | 0 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_SCREAMING_SNAKE_CASE = [{"type": "code", "content": INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 369 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 )
__SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 74 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Tuple = 42
__UpperCAmelCase : Union[str, Any] = 42
__UpperCAmelCase : str = 42
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Any = 42
__UpperCAmelCase : int = 42
__UpperCAmelCase : int = None
__UpperCAmelCase : Dict = None
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = '''train'''
__UpperCAmelCase : Tuple = '''dev'''
__UpperCAmelCase : Dict = '''test'''
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def __lowercase ( _a : int ,_a : Union[Split, str] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __lowercase ( _a : str ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __lowercase ( _a : List[InputExample] ,_a : List[str] ,_a : int ,_a : PreTrainedTokenizer ,_a : Tuple=False ,_a : List[str]="[CLS]" ,_a : Tuple=1 ,_a : str="[SEP]" ,_a : Dict=False ,_a : int=False ,_a : Union[str, Any]=0 ,_a : str=0 ,_a : Optional[Any]=-100 ,_a : int=0 ,_a : Any=True ,):
'''simple docstring'''
_a : Dict = {label: i for i, label in enumerate(_A )}
_a : Dict = []
for ex_index, example in enumerate(_A ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' ,_A ,len(_A ) )
_a : str = []
_a : Tuple = []
for word, label in zip(example.words ,example.labels ):
_a : Union[str, Any] = tokenizer.tokenize(_A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_A ) > 0:
tokens.extend(_A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Tuple = tokenizer.num_special_tokens_to_add()
if len(_A ) > max_seq_length - special_tokens_count:
_a : str = tokens[: (max_seq_length - special_tokens_count)]
_a : Union[str, Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Any = [sequence_a_segment_id] * len(_A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Optional[Any] = [cls_token] + tokens
_a : str = [pad_token_label_id] + label_ids
_a : int = [cls_token_segment_id] + segment_ids
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(_A )
# Zero-pad up to the sequence length.
_a : List[Any] = max_seq_length - len(_A )
if pad_on_left:
_a : Union[str, Any] = ([pad_token] * padding_length) + input_ids
_a : int = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Any = ([pad_token_segment_id] * padding_length) + segment_ids
_a : List[str] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' ,example.guid )
logger.info('tokens: %s' ,' '.join([str(_A ) for x in tokens] ) )
logger.info('input_ids: %s' ,' '.join([str(_A ) for x in input_ids] ) )
logger.info('input_mask: %s' ,' '.join([str(_A ) for x in input_mask] ) )
logger.info('segment_ids: %s' ,' '.join([str(_A ) for x in segment_ids] ) )
logger.info('label_ids: %s' ,' '.join([str(_A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : List[Any] = None
features.append(
InputFeatures(
input_ids=_A ,attention_mask=_A ,token_type_ids=_A ,label_ids=_A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = 42
__UpperCAmelCase : Tuple = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] ,_a : TokenClassificationTask ,_a : str ,_a : PreTrainedTokenizer ,_a : List[str] ,_a : str ,_a : Optional[int] = None ,_a : Optional[int]=False ,_a : Split = Split.train ,):
'''simple docstring'''
_a : List[Any] = os.path.join(
_A ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(_A ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : str = cached_features_file + '''.lock'''
with FileLock(_A ):
if os.path.exists(_A ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_a : Union[str, Any] = torch.load(_A )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_a : Dict = token_classification_task.read_examples_from_file(_A ,_A )
# TODO clean up all this to leverage built-in features of tokenizers
_a : Dict = token_classification_task.convert_examples_to_features(
_A ,_A ,_A ,_A ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_A ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features ,_A )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Any = 42
__UpperCAmelCase : int = -100
def __init__( self : Dict ,_a : TokenClassificationTask ,_a : str ,_a : PreTrainedTokenizer ,_a : List[str] ,_a : str ,_a : Optional[int] = None ,_a : List[Any]=False ,_a : Split = Split.train ,):
'''simple docstring'''
_a : Dict = token_classification_task.read_examples_from_file(_A ,_A )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] = token_classification_task.convert_examples_to_features(
_A ,_A ,_A ,_A ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_A ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : int = tf.data.Dataset.from_generator(
_A ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,(
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
_a : Union[str, Any] = tf.data.Dataset.from_generator(
_A ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,(
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
return self.features[i]
| 229 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCAmelCase_ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE : Dict = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE : Any = self.validation_dir
__SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCAmelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = input_size
__SCREAMING_SNAKE_CASE : List[str] = mask_patch_size
__SCREAMING_SNAKE_CASE : Dict = model_patch_size
__SCREAMING_SNAKE_CASE : int = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size
__SCREAMING_SNAKE_CASE : int = self.rand_size**2
__SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) )
__SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] )
__SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a__ ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE : int = split['''train''']
__SCREAMING_SNAKE_CASE : Dict = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : List[Any] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case , '''decoder_type''' ):
__SCREAMING_SNAKE_CASE : Any = '''simmim'''
# adapt config
__SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size
__SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__SCREAMING_SNAKE_CASE : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case )
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE : str = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE : List[str] = '''img'''
else:
__SCREAMING_SNAKE_CASE : Tuple = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__SCREAMING_SNAKE_CASE : Any = Compose(
[
Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__SCREAMING_SNAKE_CASE : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case ):
__SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]]
__SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case )
# Initialize our trainer
__SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : int = last_checkpoint
__SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = 3_8_4
if "tiny" in model_name:
_SCREAMING_SNAKE_CASE : Tuple = [3, 3, 9, 3]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
_SCREAMING_SNAKE_CASE : int = [3, 3, 2_7, 3]
_SCREAMING_SNAKE_CASE : List[str] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 3, 2_7, 3]
_SCREAMING_SNAKE_CASE : List[Any] = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
_SCREAMING_SNAKE_CASE : Tuple = 5_1_2
if "large" in model_name:
_SCREAMING_SNAKE_CASE : List[Any] = [3, 3, 2_7, 3]
_SCREAMING_SNAKE_CASE : Dict = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
_SCREAMING_SNAKE_CASE : str = 7_6_8
if "xlarge" in model_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 3, 2_7, 3]
_SCREAMING_SNAKE_CASE : List[str] = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
_SCREAMING_SNAKE_CASE : str = 1_0_2_4
# set label information
_SCREAMING_SNAKE_CASE : List[str] = 1_5_0
_SCREAMING_SNAKE_CASE : Tuple = '''huggingface/label-files'''
_SCREAMING_SNAKE_CASE : List[Any] = '''ade20k-id2label.json'''
_SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(lowerCamelCase__, lowerCamelCase__, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : Tuple = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextConfig(
depths=lowerCamelCase__, hidden_sizes=lowerCamelCase__, out_features=["stage1", "stage2", "stage3", "stage4"] )
_SCREAMING_SNAKE_CASE : int = UperNetConfig(
backbone_config=lowerCamelCase__, auxiliary_in_channels=lowerCamelCase__, num_labels=lowerCamelCase__, idalabel=lowerCamelCase__, labelaid=lowerCamelCase__, )
return config
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any] ) -> Dict:
_SCREAMING_SNAKE_CASE : Tuple = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : int, lowerCamelCase__ : List[str] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = dct.pop(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = val
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[str], lowerCamelCase__ : Dict ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
_SCREAMING_SNAKE_CASE : Tuple = model_name_to_url[model_name]
_SCREAMING_SNAKE_CASE : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase__, map_location="cpu" )['''state_dict''']
_SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Dict = UperNetForSemanticSegmentation(lowerCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(lowerCamelCase__ )
if "bn" in key:
_SCREAMING_SNAKE_CASE : Dict = key.replace("bn", "batch_norm" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = val
# rename keys
_SCREAMING_SNAKE_CASE : int = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify on image
_SCREAMING_SNAKE_CASE : Optional[Any] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
_SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_SCREAMING_SNAKE_CASE : Tuple = SegformerImageProcessor()
_SCREAMING_SNAKE_CASE : Tuple = processor(lowerCamelCase__, return_tensors="pt" ).pixel_values
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase__ )
if model_name == "upernet-convnext-tiny":
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase__, atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 572 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''data2vec-vision'''
def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : List[str] = use_mask_token
__SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias
__SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : str = out_indices
__SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
| 74 | 0 |
import os
import sys
import unittest
_UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_UpperCamelCase = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_UpperCamelCase = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = get_test_to_tester_mapping(_A )
UpperCAmelCase__ = get_test_to_tester_mapping(_A )
UpperCAmelCase__ = {'''BertModelTest''': '''BertModelTester'''}
UpperCAmelCase__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = get_model_to_test_mapping(_A )
UpperCAmelCase__ = get_model_to_test_mapping(_A )
UpperCAmelCase__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
UpperCAmelCase__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = get_model_to_tester_mapping(_A )
UpperCAmelCase__ = get_model_to_tester_mapping(_A )
UpperCAmelCase__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
UpperCAmelCase__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
| 146 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
__SCREAMING_SNAKE_CASE : str = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_choices
__SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase_ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
__SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A )
__SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 74 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class a_ ( lowerCAmelCase__ ):
lowercase = ["""pixel_values"""]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(**_A )
UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCamelCase = get_size_dict(_A , default_to_square=_A )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase = do_convert_rgb
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = get_size_dict(_A , default_to_square=_A )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
UpperCamelCase = (size['''height'''], size['''width'''])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(_A , default_to_square=_A )
UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase = [convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCamelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=_A )
return encoded_outputs
| 301 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase_ = None
try:
import msvcrt
except ImportError:
lowercase_ = None
try:
import fcntl
except ImportError:
lowercase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase_ = OSError
# Data
# ------------------------------------------------
lowercase_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowercase_ = """3.0.12"""
lowercase_ = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ )
return _logger
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file
return None
def __str__( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = lock
return None
def __enter__( self : Any ):
"""simple docstring"""
return self.lock
def __exit__( self : str , _A : Any , _A : int , _A : Any ):
"""simple docstring"""
self.lock.release()
return None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A )
# The path to the lock file.
__SCREAMING_SNAKE_CASE : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE : str = None
# The default timeout value.
__SCREAMING_SNAKE_CASE : Any = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE : int = 0
return None
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = float(_A )
return None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ):
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE : Tuple = id(self )
__SCREAMING_SNAKE_CASE : Any = self._lock_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : int , _A : List[str]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE : Optional[int] = id(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__SCREAMING_SNAKE_CASE : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : int ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ):
"""simple docstring"""
self.release()
return None
def __del__( self : int ):
"""simple docstring"""
self.release(force=_A )
return None
def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A , _A )
else:
return path
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(_A , timeout=_A , max_filename_length=_A )
__SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A )
except OSError:
pass
else:
try:
msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : str = fd
return None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self._lock_file_fd
__SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A , timeout=_A , max_filename_length=_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A )
try:
fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : int = fd
return None
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd
__SCREAMING_SNAKE_CASE : Any = None
fcntl.flock(_A , fcntl.LOCK_UN )
os.close(_A )
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = fd
return None
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase_ = None
if msvcrt:
lowercase_ = WindowsFileLock
elif fcntl:
lowercase_ = UnixFileLock
else:
lowercase_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 74 | 0 |
from math import factorial
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(UpperCamelCase__ ) // (factorial(UpperCamelCase__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'fifty-two card deck is: {combinations(5_2, 5)}\n',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'4 for group projects, there are {combinations(4_0, 4)} ways',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'are {combinations(1_0, 3)} ways that first, second and',
"""third place can be awarded.""",
)
| 240 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
__SCREAMING_SNAKE_CASE : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = False
# Check that strings has a valid type
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(_A )}.''' )
__SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
__SCREAMING_SNAKE_CASE : Dict = [html_strings]
# Get nodes + xpaths
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
for html_string in html_strings:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A )
nodes.append(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
__SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
__SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 74 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
snake_case_ : List[Any] = None
try:
import msvcrt
except ImportError:
snake_case_ : List[str] = None
try:
import fcntl
except ImportError:
snake_case_ : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
snake_case_ : Tuple = OSError
# Data
# ------------------------------------------------
snake_case_ : Any = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
snake_case_ : List[Any] = '3.0.12'
snake_case_ : Any = None
def A__ ( ):
global _logger
_UpperCamelCase : Optional[Any] = _logger or logging.getLogger(__name__ )
return _logger
class lowercase__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = lock_file
return None
def __str__( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class lowercase__ :
def __init__( self : Optional[Any] ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : str = lock
return None
def __enter__( self : Any ):
'''simple docstring'''
return self.lock
def __exit__( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ):
'''simple docstring'''
self.lock.release()
return None
class lowercase__ :
def __init__( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int]=-1 ,lowerCamelCase__ : List[Any]=None ):
'''simple docstring'''
_UpperCamelCase : List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_UpperCamelCase : Optional[Any] = self.hash_filename_if_too_long(_A ,_A )
# The path to the lock file.
_UpperCamelCase : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_UpperCamelCase : str = None
# The default timeout value.
_UpperCamelCase : Any = timeout
# We use this lock primarily for the lock counter.
_UpperCamelCase : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_UpperCamelCase : int = 0
return None
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self._lock_file
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self._timeout
@timeout.setter
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = float(_A )
return None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self._lock_file_fd is not None
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Optional[Any]=0.0_5 ):
'''simple docstring'''
if timeout is None:
_UpperCamelCase : Optional[int] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_UpperCamelCase : Tuple = id(self )
_UpperCamelCase : Any = self._lock_file
_UpperCamelCase : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_UpperCamelCase : Optional[Any] = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[str]=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_UpperCamelCase : Optional[int] = id(self )
_UpperCamelCase : Union[str, Any] = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_UpperCamelCase : int = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : int ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : int ):
'''simple docstring'''
self.release()
return None
def __del__( self : int ):
'''simple docstring'''
self.release(force=_A )
return None
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : str = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
_UpperCamelCase : Tuple = os.path.dirname(_A )
_UpperCamelCase : Optional[int] = str(hash(_A ) )
_UpperCamelCase : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A ,_A )
else:
return path
class lowercase__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any]=-1 ,lowerCamelCase__ : Dict=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(_A ,timeout=_A ,max_filename_length=_A )
_UpperCamelCase : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_UpperCamelCase : List[str] = os.open(self._lock_file ,_A )
except OSError:
pass
else:
try:
msvcrt.locking(_A ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(_A )
else:
_UpperCamelCase : str = fd
return None
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Any = self._lock_file_fd
_UpperCamelCase : int = None
msvcrt.locking(_A ,msvcrt.LK_UNLCK ,1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase__ ( lowerCAmelCase__ ):
def __init__( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict=-1 ,lowerCamelCase__ : str=None ):
'''simple docstring'''
_UpperCamelCase : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A ,timeout=_A ,max_filename_length=_A )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_UpperCamelCase : int = os.open(self._lock_file ,_A )
try:
fcntl.flock(_A ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
_UpperCamelCase : int = fd
return None
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self._lock_file_fd
_UpperCamelCase : Any = None
fcntl.flock(_A ,fcntl.LOCK_UN )
os.close(_A )
return None
class lowercase__ ( lowerCAmelCase__ ):
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_UpperCamelCase : Optional[Any] = os.open(self._lock_file ,_A )
except OSError:
pass
else:
_UpperCamelCase : List[str] = fd
return None
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
os.close(self._lock_file_fd )
_UpperCamelCase : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
snake_case_ : Optional[Any] = None
if msvcrt:
snake_case_ : Dict = WindowsFileLock
elif fcntl:
snake_case_ : List[Any] = UnixFileLock
else:
snake_case_ : Any = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 195 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ):
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case )
else:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case )
if hidden_sizes == 192:
__SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case )
if hidden_sizes == 256:
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case )
if hidden_sizes == 384:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case )
from_model.eval()
__SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval()
__SCREAMING_SNAKE_CASE : int = OrderedDict()
__SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict()
__SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() )
print(len(snake_case ) , len(snake_case ) )
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE : int = weights[og_keys[i]]
our_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) )
__SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits
assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE : Union[str, Any] = name
print(snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a__ ( snake_case , snake_case = None , snake_case = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : int = 1_000
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : str = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 74 | 0 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __a :
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=4 , snake_case="gelu" , snake_case=0.0 , snake_case=0.1 , snake_case=True , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : List[str] = is_training
lowerCAmelCase__ : int = use_input_mask
lowerCAmelCase__ : List[str] = use_token_type_ids
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_multiple_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : int = hidden_dropout
lowerCAmelCase__ : str = attention_dropout
lowerCAmelCase__ : List[str] = weight_tying
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Optional[int] = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Union[str, Any] = num_choices
lowerCAmelCase__ : Optional[int] = scope
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = GPTNeoXJapaneseModel(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase__ : str = model(_A , attention_mask=_A )
lowerCAmelCase__ : Optional[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : List[Any] = GPTNeoXJapaneseModel(_A )
model.to(_A )
model.eval()
lowerCAmelCase__ : int = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase__ : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Any = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
lowerCAmelCase__ : Union[str, Any] = model(_A , attention_mask=_A , use_cache=_A )
lowerCAmelCase__ : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : int = model(_A , attention_mask=_A , output_hidden_states=_A )
lowerCAmelCase__ : Optional[Any] = output_from_no_past['''hidden_states'''][0]
lowerCAmelCase__ : Tuple = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : int = config_and_inputs
lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__UpperCamelCase : Union[str, Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__UpperCamelCase : Any = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
__UpperCamelCase : str = False
__UpperCamelCase : Tuple = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = GPTNeoXJapaneseModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=_A , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ : Tuple = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = '''abeja/gpt-neox-japanese-2.7b'''
lowerCAmelCase__ : List[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
lowerCAmelCase__ : str = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
lowerCAmelCase__ : List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_A )
lowerCAmelCase__ : int = GPTNeoXJapaneseForCausalLM.from_pretrained(_A )
lowerCAmelCase__ : Any = []
for prompt in prompts:
lowerCAmelCase__ : str = tokenizer(_A , return_tensors="pt" ).input_ids
lowerCAmelCase__ : Any = model.generate(_A , max_length=50 )
lowerCAmelCase__ : Optional[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A )
predicted_outputs += generated_string
self.assertListEqual(_A , _A )
| 453 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class lowercase ( lowerCAmelCase__ ):
_a = "mgp-str"
def __init__( self , _a=[32, 128] , _a=4 , _a=3 , _a=27 , _a=38 , _a=5_0257 , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=4.0 , _a=True , _a=False , _a=1e-5 , _a=0.0 , _a=0.0 , _a=0.0 , _a=False , _a=0.02 , **_a , ) -> List[str]:
super().__init__(**_A )
_A : List[Any] = image_size
_A : str = patch_size
_A : List[Any] = num_channels
_A : Tuple = max_token_length
_A : Dict = num_character_labels
_A : Any = num_bpe_labels
_A : Any = num_wordpiece_labels
_A : Optional[int] = hidden_size
_A : List[Any] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : List[Any] = mlp_ratio
_A : Union[str, Any] = distilled
_A : Tuple = layer_norm_eps
_A : List[Any] = drop_rate
_A : Dict = qkv_bias
_A : List[Any] = attn_drop_rate
_A : Tuple = drop_path_rate
_A : Any = output_aa_attentions
_A : Tuple = initializer_range
| 307 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = set()
__SCREAMING_SNAKE_CASE : str = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
__SCREAMING_SNAKE_CASE : int = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
__SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( snake_case ):
"""simple docstring"""
return values.split(''',''' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase_ = extract_warnings(args.output_dir, args.targets)
lowercase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 74 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
UpperCamelCase__ : Any = "2020.9.26"
UpperCamelCase__ : List[Any] = "xcodz-dot, cclaus, dhruvmanila"
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in locals().values() ):
_SCREAMING_SNAKE_CASE = F"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = ((x * distance) / (z + distance)) * scale
_SCREAMING_SNAKE_CASE = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("""Axis must be a str""" )
_SCREAMING_SNAKE_CASE = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in input_variables.values() ):
_SCREAMING_SNAKE_CASE = (
'''Input values except axis must either be float or int: '''
F"{list(input_variables.values() )}"
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
_SCREAMING_SNAKE_CASE = x * math.cos(SCREAMING_SNAKE_CASE_ ) - y * math.sin(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = y * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = z
elif axis == "x":
_SCREAMING_SNAKE_CASE = y * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = z * math.cos(SCREAMING_SNAKE_CASE_ ) + y * math.sin(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = x
elif axis == "y":
_SCREAMING_SNAKE_CASE = x * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = z * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = y
else:
raise ValueError("""not a valid axis, choose one of \'x\', \'y\', \'z\'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 591 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A )
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , )
for d in range(_A )
] )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
__SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
__SCREAMING_SNAKE_CASE : Dict = hidden_states
__SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A )
__SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A )
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE : Optional[Any] = block(
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , )
# 3. Output
__SCREAMING_SNAKE_CASE : Any = self.proj_out(_A )
__SCREAMING_SNAKE_CASE : List[str] = (
hidden_states[None, None, :]
.reshape(_A , _A , _A , _A , _A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_A )
| 74 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase__ ):
def __init__( self ,*a_ ,**a_ ):
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' ,_A ,)
super().__init__(*_A ,**_A )
| 193 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74 | 0 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=0 ) -> List[Any]:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with FSDP.state_dict_type(
__lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case = os.path.join(__lowerCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
snake_case = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__lowerCAmelCase , storage_writer=dist_cp.FileSystemWriter(__lowerCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int=0 ) -> int:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__lowerCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case = torch.load(__lowerCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case = torch.load(__lowerCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case = (
os.path.join(__lowerCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
snake_case = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__lowerCAmelCase , storage_reader=dist_cp.FileSystemReader(__lowerCAmelCase ) , planner=DefaultLoadPlanner() , )
snake_case = state_dict['''model''']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=0 ) -> Tuple:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with FSDP.state_dict_type(
__lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case = FSDP.optim_state_dict(__lowerCAmelCase , __lowerCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
snake_case = os.path.join(__lowerCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowerCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int]=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
snake_case = torch.load(__lowerCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
snake_case = (
os.path.join(__lowerCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(__lowerCAmelCase ) , )
snake_case = optim_state['''optimizer''']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
snake_case = FSDP.optim_state_dict_to_load(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
optimizer.load_state_dict(__lowerCAmelCase )
| 369 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Tuple = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2'''
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_params
__SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[str] = shard(_A )
__SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 74 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : int ,_a : MutableSequence[float] ):
'''simple docstring'''
if len(_A ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_a : list[float] = list(_A )
_a : Any = degree
def __add__( self : Union[str, Any] ,_a : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_a : Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,_A )
else:
_a : List[str] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,_A )
def __sub__( self : Any ,_a : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self : int ):
'''simple docstring'''
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self : Tuple ,_a : Polynomial ):
'''simple docstring'''
_a : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,_A )
def __lowercase ( self : int ,_a : int | float ):
'''simple docstring'''
_a : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ):
'''simple docstring'''
_a : str = ''''''
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_A )
return polynomial
def __repr__( self : Any ):
'''simple docstring'''
return self.__str__()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : list[float] = [0] * self.degree
for i in range(self.degree ):
_a : Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,_A )
def __lowercase ( self : Union[str, Any] ,_a : int | float = 0 ):
'''simple docstring'''
_a : list[float] = [0] * (self.degree + 2)
_a : List[Any] = constant
for i in range(self.degree + 1 ):
_a : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,_A )
def __eq__( self : Tuple ,_a : object ):
'''simple docstring'''
if not isinstance(_A ,_A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] ,_a : object ):
'''simple docstring'''
return not self.__eq__(_A )
| 229 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2FeatureExtractor"""]
lowercase_ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=32 , snake_case__=3 , snake_case__=4 , snake_case__=[10, 20, 30, 40] , snake_case__=[2, 2, 3, 2] , snake_case__=True , snake_case__=True , snake_case__=37 , snake_case__="gelu" , snake_case__=10 , snake_case__=0.02 , snake_case__=["stage2", "stage3", "stage4"] , snake_case__=[2, 3, 4] , snake_case__=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : List[Any] = image_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
_SCREAMING_SNAKE_CASE : int = num_stages
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
_SCREAMING_SNAKE_CASE : int = depths
_SCREAMING_SNAKE_CASE : Tuple = is_training
_SCREAMING_SNAKE_CASE : List[Any] = use_labels
_SCREAMING_SNAKE_CASE : str = intermediate_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = num_labels
_SCREAMING_SNAKE_CASE : List[str] = initializer_range
_SCREAMING_SNAKE_CASE : int = out_features
_SCREAMING_SNAKE_CASE : Optional[Any] = out_indices
_SCREAMING_SNAKE_CASE : Tuple = scope
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = ConvNextVaModel(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE : Tuple = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaForImageClassification(_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = ConvNextVaBackbone(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(_A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : List[Any] = ConvNextVaBackbone(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
_SCREAMING_SNAKE_CASE : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE : int = config_and_inputs
_SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
A__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
A__ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextVaModelTester(self )
_SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_SCREAMING_SNAKE_CASE : int = True
if model_class.__name__ in [
*get_values(_A ),
*get_values(_A ),
]:
continue
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_A )
model.to(_A )
model.train()
_SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_A , _A , return_labels=_A )
_SCREAMING_SNAKE_CASE : Tuple = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Dict = True
if (
model_class.__name__
in [*get_values(_A ), *get_values(_A )]
or not model_class.supports_gradient_checkpointing
):
continue
_SCREAMING_SNAKE_CASE : str = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
_SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(_A , _A , return_labels=_A )
_SCREAMING_SNAKE_CASE : Dict = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_A )
_SCREAMING_SNAKE_CASE : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(_A , _A ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(_A , _A , _A )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : List[str] = ConvNextVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _lowerCAmelCase ( ) -> int:
_SCREAMING_SNAKE_CASE : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(_A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
_SCREAMING_SNAKE_CASE : List[str] = prepare_img()
_SCREAMING_SNAKE_CASE : Union[str, Any] = preprocessor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : int = model(**_A )
# verify the logits
_SCREAMING_SNAKE_CASE : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 572 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileBertTokenizer
lowerCAmelCase_ = MobileBertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
lowerCAmelCase_ = '''google/mobilebert-uncased'''
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__SCREAMING_SNAKE_CASE : Dict = {}
for i, token in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[str] = i
__SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
__SCREAMING_SNAKE_CASE : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有''']
__SCREAMING_SNAKE_CASE : int = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE : List[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 74 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case__: Any ) -> Any:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case__: int ) -> Tuple:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase__ = ord(snake_case__ )
if not _is_chinese_char(snake_case__ ):
return 0
return 1
def UpperCamelCase_( snake_case__: Tuple ) -> int:
UpperCAmelCase__ = set()
for token in tokens:
UpperCAmelCase__ = len(snake_case__ ) > 1 and is_chinese(snake_case__ )
if chinese_word:
word_set.add(snake_case__ )
UpperCAmelCase__ = list(snake_case__ )
return word_list
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Dict ) -> List[str]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase__ = max([len(snake_case__ ) for w in chinese_word_set] )
UpperCAmelCase__ = bert_tokens
UpperCAmelCase__ = 0, len(snake_case__ )
while start < end:
UpperCAmelCase__ = True
if is_chinese(bert_word[start] ):
UpperCAmelCase__ = min(end - start , snake_case__ )
for i in range(snake_case__ , 1 , -1 ):
UpperCAmelCase__ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase__ = '''##''' + bert_word[j]
UpperCAmelCase__ = start + i
UpperCAmelCase__ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Optional[Any] , snake_case__: str ) -> int:
UpperCAmelCase__ = []
for i in range(0 , len(snake_case__ ) , 1_00 ):
UpperCAmelCase__ = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['cws'] ).cws
UpperCAmelCase__ = [get_chinese_word(snake_case__ ) for r in res]
ltp_res.extend(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
UpperCAmelCase__ = []
for i in range(0 , len(snake_case__ ) , 1_00 ):
UpperCAmelCase__ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=snake_case__ , truncation=snake_case__ , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(snake_case__ ) == len(snake_case__ )
UpperCAmelCase__ = []
for input_ids, chinese_word in zip(snake_case__ , snake_case__ ):
UpperCAmelCase__ = []
for id in input_ids:
UpperCAmelCase__ = bert_tokenizer._convert_id_to_token(snake_case__ )
input_tokens.append(snake_case__ )
UpperCAmelCase__ = add_sub_symbol(snake_case__ , snake_case__ )
UpperCAmelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case__ ):
if token[:2] == "##":
UpperCAmelCase__ = token[2:]
# save chinese tokens' pos
if len(snake_case__ ) == 1 and _is_chinese_char(ord(snake_case__ ) ):
ref_id.append(snake_case__ )
ref_ids.append(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
return ref_ids
def UpperCamelCase_( snake_case__: Tuple ) -> Dict:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = [line.strip() for line in data if len(snake_case__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase__ = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase__ = prepare_ref(snake_case__ , snake_case__ , snake_case__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCAmelCase__ = [json.dumps(snake_case__ ) + '''\n''' for ref in ref_ids]
f.writelines(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
_UpperCamelCase = parser.parse_args()
main(args)
| 146 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 74 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( lowerCAmelCase__ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__(*_A , **_A )
UpperCamelCase = eval_examples
UpperCamelCase = post_process_function
UpperCamelCase = quant_trainer_args
UpperCamelCase = 128 # default number of calibration samples
def A__ ( self , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCamelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCamelCase = self._remove_unused_columns(_A , description="""Calibration""" )
return DataLoader(
_A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_A , )
def A__ ( self , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
UpperCamelCase = self.train_dataset if calib_dataset is None else calib_dataset
UpperCamelCase = self.get_calib_dataloader(_A )
UpperCamelCase = self.model
quant_trainer.configure_model(_A , self.quant_trainer_args , calib=_A )
model.eval()
quant_trainer.enable_calibration(_A )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(_A ):
# Prediction step
UpperCamelCase = self.prediction_step(_A , _A , prediction_loss_only=_A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_A , self.quant_trainer_args )
UpperCamelCase = model
def A__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "eval" ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase = self.get_eval_dataloader(_A )
UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
_A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCamelCase = self.post_process_function(_A , _A , output.predictions )
UpperCamelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCamelCase = metrics.pop(_A )
self.log(_A )
else:
UpperCamelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "test" ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
_A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase = self.post_process_function(_A , _A , output.predictions , """predict""" )
UpperCamelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCamelCase = metrics.pop(_A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
def A__ ( self , _SCREAMING_SNAKE_CASE="./" ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.eval_dataset
UpperCamelCase = self.get_eval_dataloader(_A )
UpperCamelCase = next(iter(_A ) )
# saving device - to make it consistent
UpperCamelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCamelCase = tuple(v.to(_A ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCamelCase = True
UpperCamelCase = self.model.to(_A )
model.eval()
model.float()
UpperCamelCase = model.module if hasattr(_A , """module""" ) else model
quant_trainer.configure_model(_A , self.quant_trainer_args )
UpperCamelCase = os.path.join(_A , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
UpperCamelCase = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
_A , _A , _A , export_params=_A , opset_version=13 , do_constant_folding=_A , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=_A , )
logger.info("""onnx export finished""" )
| 301 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = ParquetConfig
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : str , _A : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 74 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowercase ( lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase ):
'''simple docstring'''
_A : int = AutoencoderKL
_A : str = '''sample'''
_A : str = 1e-2
@property
def A_ ( self : List[str] ):
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
return {"sample": image}
@property
def A_ ( self : Any ):
return (3, 32, 32)
@property
def A_ ( self : Optional[Any] ):
return (3, 32, 32)
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def A_ ( self : Optional[Any] ):
pass
def A_ ( self : Optional[int] ):
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase__ = self.model_class(**_A )
model.to(_A )
assert not model.is_gradient_checkpointing and model.training
UpperCamelCase__ = model(**_A ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCamelCase__ = torch.randn_like(_A )
UpperCamelCase__ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCamelCase__ = self.model_class(**_A )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_A )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCamelCase__ = model_a(**_A ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCamelCase__ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCamelCase__ = dict(model.named_parameters() )
UpperCamelCase__ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def A_ ( self : int ):
UpperCamelCase__ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCamelCase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
UpperCamelCase__ = model.to(_A )
model.eval()
if torch_device == "mps":
UpperCamelCase__ = torch.manual_seed(0 )
else:
UpperCamelCase__ = torch.Generator(device=_A ).manual_seed(0 )
UpperCamelCase__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ = image.to(_A )
with torch.no_grad():
UpperCamelCase__ = model(_A , sample_posterior=_A , generator=_A ).sample
UpperCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCamelCase__ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCamelCase__ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
UpperCamelCase__ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Tuple , _a : Dict , _a : Any ):
return F"""gaussian_noise_s={seed}_shape={"_".join([str(_A ) for s in shape] )}.npy"""
def A_ ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : int , _a : str=0 , _a : Any=(4, 3, 512, 512) , _a : int=False ):
UpperCamelCase__ = torch.floataa if fpaa else torch.floataa
UpperCamelCase__ = torch.from_numpy(load_hf_numpy(self.get_file_format(_A , _A ) ) ).to(_A ).to(_A )
return image
def A_ ( self : Tuple , _a : Any="CompVis/stable-diffusion-v1-4" , _a : Dict=False ):
UpperCamelCase__ = '''fp16''' if fpaa else None
UpperCamelCase__ = torch.floataa if fpaa else torch.floataa
UpperCamelCase__ = AutoencoderKL.from_pretrained(
_A , subfolder='''vae''' , torch_dtype=_A , revision=_A , )
model.to(_A ).eval()
return model
def A_ ( self : Dict , _a : Optional[int]=0 ):
if torch_device == "mps":
return torch.manual_seed(_A )
return torch.Generator(device=_A ).manual_seed(_A )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Optional[Any] , _a : str , _a : Optional[int] , _a : Dict ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(_A )
UpperCamelCase__ = self.get_generator(_A )
with torch.no_grad():
UpperCamelCase__ = model(_A , generator=_A , sample_posterior=_A ).sample
assert sample.shape == image.shape
UpperCamelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCamelCase__ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_A , _A , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Tuple , _a : Tuple , _a : Optional[int] ):
UpperCamelCase__ = self.get_sd_vae_model(fpaa=_A )
UpperCamelCase__ = self.get_sd_image(_A , fpaa=_A )
UpperCamelCase__ = self.get_generator(_A )
with torch.no_grad():
UpperCamelCase__ = model(_A , generator=_A , sample_posterior=_A ).sample
assert sample.shape == image.shape
UpperCamelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCamelCase__ = torch.tensor(_A )
assert torch_all_close(_A , _A , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : int , _a : Dict , _a : Any , _a : List[Any] ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(_A )
with torch.no_grad():
UpperCamelCase__ = model(_A ).sample
assert sample.shape == image.shape
UpperCamelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCamelCase__ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_A , _A , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Tuple , _a : Union[str, Any] , _a : Optional[int] ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(_A , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCamelCase__ = model.decode(_A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCamelCase__ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCamelCase__ = torch.tensor(_A )
assert torch_all_close(_A , _A , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : List[str] , _a : Tuple , _a : Optional[int] ):
UpperCamelCase__ = self.get_sd_vae_model(fpaa=_A )
UpperCamelCase__ = self.get_sd_image(_A , shape=(3, 4, 64, 64) , fpaa=_A )
with torch.no_grad():
UpperCamelCase__ = model.decode(_A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCamelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCamelCase__ = torch.tensor(_A )
assert torch_all_close(_A , _A , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Union[str, Any] , _a : List[Any] ):
UpperCamelCase__ = self.get_sd_vae_model(fpaa=_A )
UpperCamelCase__ = self.get_sd_image(_A , shape=(3, 4, 64, 64) , fpaa=_A )
with torch.no_grad():
UpperCamelCase__ = model.decode(_A ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCamelCase__ = model.decode(_A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_A , _A , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Dict , _a : Union[str, Any] ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(_A , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCamelCase__ = model.decode(_A ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCamelCase__ = model.decode(_A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_A , _A , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A_ ( self : List[str] , _a : Dict , _a : Any ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(_A )
UpperCamelCase__ = self.get_generator(_A )
with torch.no_grad():
UpperCamelCase__ = model.encode(_A ).latent_dist
UpperCamelCase__ = dist.sample(generator=_A )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCamelCase__ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCamelCase__ = torch.tensor(_A )
UpperCamelCase__ = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_A , _A , atol=_A )
| 240 |
from math import isclose, sqrt
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__SCREAMING_SNAKE_CASE : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus
__SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( snake_case = 1.4 , snake_case = -9.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : float = first_x_coord
__SCREAMING_SNAKE_CASE : float = first_y_coord
__SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Optional[Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ['LayoutLMv2FeatureExtractor']
snake_case_ : List[Any] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 195 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : int = min_resolution
__SCREAMING_SNAKE_CASE : Optional[int] = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Union[str, Any] = size
__SCREAMING_SNAKE_CASE : str = do_center_crop
__SCREAMING_SNAKE_CASE : Any = crop_size
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''crop_size''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 74 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_UpperCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class __a ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase : Dict = 10000
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Dict = None
class __a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ParquetConfig
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCAmelCase__ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
lowerCAmelCase__ : Tuple = data_files
if isinstance(_A , _A ):
lowerCAmelCase__ : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase__ : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCAmelCase__ : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
lowerCAmelCase__ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase__ : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , "rb" ) as f:
lowerCAmelCase__ : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={"files": files} ) )
return splits
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase__ : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , "rb" ) as f:
lowerCAmelCase__ : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCAmelCase__ : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(_A )
except ValueError as e:
logger.error(F"""Failed to read file \'{file}\' with error {type(_A )}: {e}""" )
raise
| 453 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
__SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__SCREAMING_SNAKE_CASE : Dict = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1
return z_result
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase ( lowerCAmelCase__ ):
_a = "M-CLIP"
def __init__( self , _a=1024 , _a=768 , **_a ) -> Any:
_A : Tuple = transformerDimSize
_A : Optional[Any] = imageDimSize
super().__init__(**_A )
class lowercase ( lowerCAmelCase__ ):
_a = MCLIPConfig
def __init__( self , _a , *_a , **_a ) -> List[str]:
super().__init__(_A , *_A , **_A )
_A : List[Any] = XLMRobertaModel(_A )
_A : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def a__ ( self , _a , _a ) -> Union[str, Any]:
_A : int = self.transformer(input_ids=_A , attention_mask=_A )[0]
_A : List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_A ), embs
| 307 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _a (lowerCAmelCase__):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'wavlm'
def __init__( self , A__=32 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.1 , A__=0.1 , A__=0.02 , A__=1E-5 , A__="group" , A__="gelu" , A__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , A__=(5, 2, 2, 2, 2, 2, 2) , A__=(10, 3, 3, 3, 3, 2, 2) , A__=False , A__=1_28 , A__=16 , A__=3_20 , A__=8_00 , A__=False , A__=True , A__=0.05 , A__=10 , A__=2 , A__=0.0 , A__=10 , A__=3_20 , A__=2 , A__=0.1 , A__=1_00 , A__=2_56 , A__=2_56 , A__=0.1 , A__="mean" , A__=False , A__=False , A__=2_56 , A__=(5_12, 5_12, 5_12, 5_12, 15_00) , A__=(5, 3, 3, 1, 1) , A__=(1, 2, 3, 1, 1) , A__=5_12 , A__=80 , A__=0 , A__=1 , A__=2 , A__=False , A__=3 , A__=2 , A__=3 , A__=None , **A__ , ) -> Any:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = feat_extract_norm
_SCREAMING_SNAKE_CASE = feat_extract_activation
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = conv_bias
_SCREAMING_SNAKE_CASE = num_buckets
_SCREAMING_SNAKE_CASE = max_bucket_distance
_SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE = len(self.conv_dim )
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation_dropout
_SCREAMING_SNAKE_CASE = feat_proj_dropout
_SCREAMING_SNAKE_CASE = final_dropout
_SCREAMING_SNAKE_CASE = layerdrop
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_ctc_classes
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = do_stable_layer_norm
_SCREAMING_SNAKE_CASE = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE = apply_spec_augment
_SCREAMING_SNAKE_CASE = mask_time_prob
_SCREAMING_SNAKE_CASE = mask_time_length
_SCREAMING_SNAKE_CASE = mask_time_min_masks
_SCREAMING_SNAKE_CASE = mask_feature_prob
_SCREAMING_SNAKE_CASE = mask_feature_length
# parameters for pretraining with codevector quantized representations
_SCREAMING_SNAKE_CASE = num_codevectors_per_group
_SCREAMING_SNAKE_CASE = num_codevector_groups
_SCREAMING_SNAKE_CASE = contrastive_logits_temperature
_SCREAMING_SNAKE_CASE = num_negatives
_SCREAMING_SNAKE_CASE = codevector_dim
_SCREAMING_SNAKE_CASE = proj_codevector_dim
_SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
_SCREAMING_SNAKE_CASE = ctc_loss_reduction
_SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
_SCREAMING_SNAKE_CASE = add_adapter
_SCREAMING_SNAKE_CASE = adapter_kernel_size
_SCREAMING_SNAKE_CASE = adapter_stride
_SCREAMING_SNAKE_CASE = num_adapter_layers
_SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def UpperCamelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 591 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Tuple = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
_lowerCAmelCase : List[str] = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
_lowerCAmelCase : List[Any] = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
_lowerCAmelCase : Optional[int] = reader.read()
_lowerCAmelCase : Union[str, Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
_lowerCAmelCase : Union[str, Any] = UNetaDModel(**config)
else:
_lowerCAmelCase : Optional[int] = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
_lowerCAmelCase : List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCAmelCase : Any = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCAmelCase : List[Any] = config[key]
del config[key]
_lowerCAmelCase : Any = [k.replace("UNetRes", "") for k in config["down_block_types"]]
_lowerCAmelCase : Any = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
_lowerCAmelCase : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
_lowerCAmelCase : int = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
_lowerCAmelCase : Union[str, Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
_lowerCAmelCase : Union[str, Any] = param_value
_lowerCAmelCase : int = True
if not has_changed:
_lowerCAmelCase : Union[str, Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 193 |
from pathlib import Path
import fire
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = Path(snake_case )
__SCREAMING_SNAKE_CASE : Dict = Path(snake_case )
dest_dir.mkdir(exist_ok=snake_case )
for path in src_dir.iterdir():
__SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name )
print(snake_case )
dest_path.open('''w''' ).write('''\n'''.join(snake_case ) )
if __name__ == "__main__":
fire.Fire(minify)
| 74 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 )
__SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 74 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__lowerCAmelCase = True
from torch.cuda.amp import autocast
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : int = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCAmelCase : List[Any] = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__UpperCAmelCase : int = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
__UpperCAmelCase : Dict = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
__UpperCAmelCase : Tuple = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
__UpperCAmelCase : int = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def UpperCAmelCase_ (__a : Optional[int] , __a : Tuple ):
"""simple docstring"""
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_a : int = logging.WARNING
if model_args.verbose_logging:
_a : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_a : Dict = logging.INFO
logger.setLevel(__a )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Tuple = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[Any] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__UpperCAmelCase : str = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__UpperCAmelCase : Any = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
__UpperCAmelCase : List[Any] = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__UpperCAmelCase : List[Any] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
__UpperCAmelCase : str = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__UpperCAmelCase : Any = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = 42
__UpperCAmelCase : int = 42
__UpperCAmelCase : Optional[int] = '''longest'''
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[int] = None
def __call__( self : List[Any] ,_a : List[Dict[str, Union[List[int], torch.Tensor]]] ):
'''simple docstring'''
_a : Tuple = self.feature_extractor.pad(
_A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
_a : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
_a : Any = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_a : str = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
_a : Tuple = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_a : int = 1
_a : Union[str, Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_a : Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=_A ,min_masks=2 ,)
return batch
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : str ,*_a : Dict ,_a : str=1 ,_a : int=0 ,_a : List[Any]=1.0 ,**_a : str ):
'''simple docstring'''
super().__init__(*_A ,**_A )
_a : List[str] = 0
_a : List[Any] = max_gumbel_temp
_a : List[Any] = min_gumbel_temp
_a : Union[str, Any] = gumbel_temp_decay
def __lowercase ( self : Optional[Any] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ):
'''simple docstring'''
model.train()
_a : Any = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_a : Dict = self.compute_loss(_A ,_A )
else:
_a : Dict = self.compute_loss(_A ,_A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_a : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_a : Union[str, Any] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']""" )
if self.args.gradient_accumulation_steps > 1:
_a : Dict = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a : Any = parser.parse_args_into_dataclasses()
configure_logger(__a , __a )
# Downloading and loading a dataset from the hub.
_a : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_a : str = DatasetDict()
_a : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
_a : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_a : Tuple = DatasetDict()
_a : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
_a : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_a : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__a )
def prepare_dataset(__a : List[str] ):
# check that all files have the correct sampling rate
_a : Optional[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_a : List[Any] = datasets.map(
__a , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
_a : Dict = vectorized_datasets.filter(
lambda __a : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__a : Dict ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_a : int = vectorized_datasets.map(
__a , batched=__a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_a : Union[str, Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
_a : List[str] = WavaVecaForPreTraining(__a )
_a : Optional[Any] = DataCollatorForWavaVecaPretraining(model=__a , feature_extractor=__a )
_a : Dict = WavaVecaPreTrainer(
model=__a , data_collator=__a , args=__a , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=__a , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 229 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCAmelCase_ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE : Dict = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE : Any = self.validation_dir
__SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCAmelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = input_size
__SCREAMING_SNAKE_CASE : List[str] = mask_patch_size
__SCREAMING_SNAKE_CASE : Dict = model_patch_size
__SCREAMING_SNAKE_CASE : int = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size
__SCREAMING_SNAKE_CASE : int = self.rand_size**2
__SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) )
__SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] )
__SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a__ ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE : int = split['''train''']
__SCREAMING_SNAKE_CASE : Dict = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : List[Any] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case , '''decoder_type''' ):
__SCREAMING_SNAKE_CASE : Any = '''simmim'''
# adapt config
__SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size
__SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__SCREAMING_SNAKE_CASE : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case )
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE : str = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE : List[str] = '''img'''
else:
__SCREAMING_SNAKE_CASE : Tuple = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__SCREAMING_SNAKE_CASE : Any = Compose(
[
Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__SCREAMING_SNAKE_CASE : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case ):
__SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]]
__SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case )
# Initialize our trainer
__SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : int = last_checkpoint
__SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 74 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int] ) -> Any:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCAmelCase ( lowerCamelCase__ : Any ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = create_tensor(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = gather(lowerCamelCase__ )
assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1 ) )
def _lowerCAmelCase ( lowerCamelCase__ : List[str] ) -> Tuple:
_SCREAMING_SNAKE_CASE : str = [state.process_index]
_SCREAMING_SNAKE_CASE : Optional[int] = gather_object(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowerCamelCase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _lowerCAmelCase ( lowerCamelCase__ : Dict ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = create_tensor(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = broadcast(lowerCamelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1 ) )
def _lowerCAmelCase ( lowerCamelCase__ : Tuple ) -> Dict:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE : List[str] = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE : List[Any] = pad_across_processes(lowerCamelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0, state.num_processes ) ) + [0]
def _lowerCAmelCase ( lowerCamelCase__ : List[str] ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE : Any = create_tensor(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = reduce(lowerCamelCase__, "sum" )
_SCREAMING_SNAKE_CASE : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCamelCase__, lowerCamelCase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int] ) -> int:
# For now runs on only two processes
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE : Dict = create_tensor(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Any = reduce(lowerCamelCase__, "mean" )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCamelCase__, lowerCamelCase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _lowerCAmelCase ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
main()
def _lowerCAmelCase ( ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = PartialState()
state.print(f'''State: {state}''' )
state.print("testing gather" )
test_gather(lowerCamelCase__ )
state.print("testing gather_object" )
test_gather_object(lowerCamelCase__ )
state.print("testing broadcast" )
test_broadcast(lowerCamelCase__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(lowerCamelCase__ )
state.print("testing reduce_sum" )
test_reduce_sum(lowerCamelCase__ )
state.print("testing reduce_mean" )
test_reduce_mean(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 572 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''data2vec-vision'''
def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : List[str] = use_mask_token
__SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias
__SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : str = out_indices
__SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
| 74 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> Dict:
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
UpperCAmelCase__ = img
UpperCAmelCase__ = img.shape[1]
UpperCAmelCase__ = img.shape[0]
UpperCAmelCase__ = dst_width
UpperCAmelCase__ = dst_height
UpperCAmelCase__ = self.src_w / self.dst_w
UpperCAmelCase__ = self.src_h / self.dst_h
UpperCAmelCase__ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase__ = self.img[self.get_y(_A )][self.get_x(_A )]
def UpperCamelCase__ (self , __a ) -> List[str]:
"""simple docstring"""
return int(self.ratio_x * x )
def UpperCamelCase__ (self , __a ) -> List[str]:
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase = 800, 600
_UpperCamelCase = imread('''image_data/lena.jpg''', 1)
_UpperCamelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 146 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
__SCREAMING_SNAKE_CASE : str = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_choices
__SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase_ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
__SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A )
__SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 74 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '▁'
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
SCREAMING_SNAKE_CASE__ = {
'facebook/xglm-564M': 2_0_4_8,
}
class a_ ( lowerCAmelCase__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase = 7
UpperCamelCase = [F"<madeupword{i}>" for i in range(self.num_madeup_words )]
UpperCamelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCamelCase = len(self.sp_model )
UpperCamelCase = {F"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_A )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> str:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A ))
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> int:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ''''''.join(_A ).replace(_A , """ """ ).strip()
return out_string
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , """wb""" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 301 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase_ = None
try:
import msvcrt
except ImportError:
lowercase_ = None
try:
import fcntl
except ImportError:
lowercase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase_ = OSError
# Data
# ------------------------------------------------
lowercase_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowercase_ = """3.0.12"""
lowercase_ = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ )
return _logger
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file
return None
def __str__( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = lock
return None
def __enter__( self : Any ):
"""simple docstring"""
return self.lock
def __exit__( self : str , _A : Any , _A : int , _A : Any ):
"""simple docstring"""
self.lock.release()
return None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A )
# The path to the lock file.
__SCREAMING_SNAKE_CASE : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE : str = None
# The default timeout value.
__SCREAMING_SNAKE_CASE : Any = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE : int = 0
return None
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = float(_A )
return None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ):
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE : Tuple = id(self )
__SCREAMING_SNAKE_CASE : Any = self._lock_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : int , _A : List[str]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE : Optional[int] = id(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__SCREAMING_SNAKE_CASE : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : int ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ):
"""simple docstring"""
self.release()
return None
def __del__( self : int ):
"""simple docstring"""
self.release(force=_A )
return None
def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A , _A )
else:
return path
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(_A , timeout=_A , max_filename_length=_A )
__SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A )
except OSError:
pass
else:
try:
msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : str = fd
return None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self._lock_file_fd
__SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A , timeout=_A , max_filename_length=_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A )
try:
fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : int = fd
return None
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd
__SCREAMING_SNAKE_CASE : Any = None
fcntl.flock(_A , fcntl.LOCK_UN )
os.close(_A )
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = fd
return None
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase_ = None
if msvcrt:
lowercase_ = WindowsFileLock
elif fcntl:
lowercase_ = UnixFileLock
else:
lowercase_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 74 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
UpperCamelCase__ = Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
UpperCamelCase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073), (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCamelCase__ = transform(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
return image
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if "visual_encoder" in key:
UpperCamelCase__ = re.sub('''visual_encoder*''', '''vision_model.encoder''', UpperCamelCase__ )
if "blocks" in key:
UpperCamelCase__ = re.sub(r'''blocks''', '''layers''', UpperCamelCase__ )
if "attn" in key:
UpperCamelCase__ = re.sub(r'''attn''', '''self_attn''', UpperCamelCase__ )
if "norm1" in key:
UpperCamelCase__ = re.sub(r'''norm1''', '''layer_norm1''', UpperCamelCase__ )
if "norm2" in key:
UpperCamelCase__ = re.sub(r'''norm2''', '''layer_norm2''', UpperCamelCase__ )
if "encoder.norm" in key:
UpperCamelCase__ = re.sub(r'''encoder.norm''', '''post_layernorm''', UpperCamelCase__ )
if "encoder.patch_embed.proj" in key:
UpperCamelCase__ = re.sub(r'''encoder.patch_embed.proj''', '''embeddings.patch_embedding''', UpperCamelCase__ )
if "encoder.pos_embed" in key:
UpperCamelCase__ = re.sub(r'''encoder.pos_embed''', '''embeddings.position_embedding''', UpperCamelCase__ )
if "encoder.cls_token" in key:
UpperCamelCase__ = re.sub(r'''encoder.cls_token''', '''embeddings.class_embedding''', UpperCamelCase__ )
if "self_attn" in key:
UpperCamelCase__ = re.sub(r'''self_attn.proj''', '''self_attn.projection''', UpperCamelCase__ )
return key
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple=None ):
'''simple docstring'''
if config_path is not None:
UpperCamelCase__ = BlipConfig.from_pretrained(UpperCamelCase__ )
else:
UpperCamelCase__ = BlipConfig(projection_dim=512, text_config={}, vision_config={} )
UpperCamelCase__ = BlipForConditionalGeneration(UpperCamelCase__ ).eval()
UpperCamelCase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
UpperCamelCase__ = blip_decoder(pretrained=UpperCamelCase__, image_size=384, vit='''base''' )
UpperCamelCase__ = pt_model.eval()
UpperCamelCase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase__ = modified_state_dict.pop(UpperCamelCase__ )
UpperCamelCase__ = rename_key(UpperCamelCase__ )
UpperCamelCase__ = value
hf_model.load_state_dict(UpperCamelCase__ )
UpperCamelCase__ = 384
UpperCamelCase__ = load_demo_image(image_size=UpperCamelCase__, device='''cpu''' )
UpperCamelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCamelCase__ = tokenizer(['''a picture of'''] ).input_ids
UpperCamelCase__ = hf_model.generate(UpperCamelCase__, UpperCamelCase__ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCamelCase__ = hf_model.generate(UpperCamelCase__ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCamelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
UpperCamelCase__ = blip_vqa(pretrained=UpperCamelCase__, image_size=UpperCamelCase__, vit='''base''' )
vqa_model.eval()
UpperCamelCase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase__ = modified_state_dict.pop(UpperCamelCase__ )
UpperCamelCase__ = rename_key(UpperCamelCase__ )
UpperCamelCase__ = value
UpperCamelCase__ = BlipForQuestionAnswering(UpperCamelCase__ )
hf_vqa_model.load_state_dict(UpperCamelCase__ )
UpperCamelCase__ = ['''How many dogs are in this image?''']
UpperCamelCase__ = tokenizer(UpperCamelCase__, return_tensors='''pt''' ).input_ids
UpperCamelCase__ = hf_vqa_model.generate(UpperCamelCase__, UpperCamelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
UpperCamelCase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
UpperCamelCase__ = blip_itm(pretrained=UpperCamelCase__, image_size=UpperCamelCase__, vit='''base''' )
itm_model.eval()
UpperCamelCase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase__ = modified_state_dict.pop(UpperCamelCase__ )
UpperCamelCase__ = rename_key(UpperCamelCase__ )
UpperCamelCase__ = value
UpperCamelCase__ = BlipForImageTextRetrieval(UpperCamelCase__ )
UpperCamelCase__ = ['''A picture of a woman with a dog sitting in a beach''']
UpperCamelCase__ = tokenizer(
UpperCamelCase__, return_tensors='''pt''', padding='''max_length''', truncation=UpperCamelCase__, max_length=35, ).input_ids
hf_itm_model.load_state_dict(UpperCamelCase__ )
hf_itm_model.eval()
UpperCamelCase__ = hf_itm_model(UpperCamelCase__, UpperCamelCase__, use_itm_head=UpperCamelCase__ )
UpperCamelCase__ = hf_itm_model(UpperCamelCase__, UpperCamelCase__, use_itm_head=UpperCamelCase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0], dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowercase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 240 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
__SCREAMING_SNAKE_CASE : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = False
# Check that strings has a valid type
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(_A )}.''' )
__SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
__SCREAMING_SNAKE_CASE : Dict = [html_strings]
# Get nodes + xpaths
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
for html_string in html_strings:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A )
nodes.append(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
__SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
__SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 74 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 195 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ):
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case )
else:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case )
if hidden_sizes == 192:
__SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case )
if hidden_sizes == 256:
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case )
if hidden_sizes == 384:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case )
from_model.eval()
__SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval()
__SCREAMING_SNAKE_CASE : int = OrderedDict()
__SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict()
__SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() )
print(len(snake_case ) , len(snake_case ) )
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE : int = weights[og_keys[i]]
our_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) )
__SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits
assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE : Union[str, Any] = name
print(snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a__ ( snake_case , snake_case = None , snake_case = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : int = 1_000
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : str = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 74 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , ):
"""simple docstring"""
lowerCAmelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Optional[Any] = num_channels
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : Tuple = min_resolution
lowerCAmelCase__ : Tuple = max_resolution
lowerCAmelCase__ : Optional[Any] = do_resize
lowerCAmelCase__ : Union[str, Any] = size
lowerCAmelCase__ : Any = do_normalize
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __a ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "clusters" ) )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "do_normalize" ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCAmelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ : Optional[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , obj[key] ) )
else:
self.assertEqual(obj[key] , _A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[str] = os.path.join(_A , "image_processor.json" )
image_processor_first.to_json_file(_A )
lowerCAmelCase__ : Optional[Any] = self.image_processing_class.from_json_file(_A ).to_dict()
lowerCAmelCase__ : List[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_A )
lowerCAmelCase__ : List[str] = self.image_processing_class.from_pretrained(_A ).to_dict()
lowerCAmelCase__ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
@unittest.skip("ImageGPT requires clusters at initialization" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
lowerCAmelCase__ : Dict = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
lowerCAmelCase__ : Optional[Any] = Image.open(dataset[4]["file"] )
lowerCAmelCase__ : str = Image.open(dataset[5]["file"] )
lowerCAmelCase__ : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class __a ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
lowerCAmelCase__ : Union[str, Any] = prepare_images()
# test non-batched
lowerCAmelCase__ : Tuple = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
lowerCAmelCase__ : Optional[int] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _A )
# test batched
lowerCAmelCase__ : str = image_processing(_A , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
lowerCAmelCase__ : Optional[int] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _A )
| 453 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( lowerCAmelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Optional[Any] = PegasusTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[int]:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> Tuple:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> Any:
_A : Dict = '''</s>'''
_A : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def a__ ( self ) -> int:
_A : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_A ) , 1103 )
def a__ ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Dict:
_A : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Tuple = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
_A : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
_A : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A , _A )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Dict = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
_A : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Any = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0]
self.assertListEqual(_A , _A )
def a__ ( self ) -> List[Any]:
_A : List[str] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : List[str] = '''To ensure a smooth flow of bank resolutions.'''
_A : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0]
self.assertListEqual(_A , _A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> Dict:
_A : Optional[Any] = ['''This is going to be way too long.''' * 150, '''short example''']
_A : Dict = ['''not super long but more than 5 tokens''', '''tiny''']
_A : Union[str, Any] = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors="""pt""" )
_A : Dict = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
_A : List[str] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( lowerCAmelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : int = PegasusTokenizer(_A , offset=0 , mask_token_sent=_A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> Dict:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A )
def a__ ( self , _a ) -> Optional[int]:
return ("This is a test", "This is a test")
def a__ ( self ) -> str:
_A : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
_A : Any = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
_A : Tuple = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A , _A )
@require_torch
def a__ ( self ) -> int:
_A : int = ['''This is going to be way too long.''' * 1000, '''short example''']
_A : Union[str, Any] = ['''not super long but more than 5 tokens''', '''tiny''']
_A : Optional[Any] = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors="""pt""" )
_A : Dict = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Optional[Any]:
_A : Dict = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
_A : Dict = self._large_tokenizer(_A ).input_ids
self.assertListEqual(
_A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 307 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = set()
__SCREAMING_SNAKE_CASE : str = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
__SCREAMING_SNAKE_CASE : int = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
__SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( snake_case ):
"""simple docstring"""
return values.split(''',''' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase_ = extract_warnings(args.output_dir, args.targets)
lowercase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 74 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
_SCREAMING_SNAKE_CASE = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
_SCREAMING_SNAKE_CASE = DisjunctiveConstraint(_A )
_SCREAMING_SNAKE_CASE = dc.update(1 )
_SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_SCREAMING_SNAKE_CASE = dc.update(2 )
_SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_SCREAMING_SNAKE_CASE = dc.update(3 )
_SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_SCREAMING_SNAKE_CASE = DisjunctiveConstraint(_A )
_SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 591 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A )
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , )
for d in range(_A )
] )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
__SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
__SCREAMING_SNAKE_CASE : Dict = hidden_states
__SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A )
__SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A )
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE : Optional[Any] = block(
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , )
# 3. Output
__SCREAMING_SNAKE_CASE : Any = self.proj_out(_A )
__SCREAMING_SNAKE_CASE : List[str] = (
hidden_states[None, None, :]
.reshape(_A , _A , _A , _A , _A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_A )
| 74 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase_ ( snake_case__ = True , *snake_case__ , **snake_case__ ) -> Optional[int]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
lowerCAmelCase__ = False
if main_process_only:
lowerCAmelCase__ = PartialState().local_process_index == 0
return _tqdm(*snake_case__ , **snake_case__ , disable=snake_case__ )
| 193 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> str:
snake_case = nn.functional.normalize(__lowerCAmelCase )
snake_case = nn.functional.normalize(__lowerCAmelCase )
return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = CLIPConfig
snake_case_ = ["CLIPEncoderLayer"]
def __init__( self : List[Any] , __snake_case : CLIPConfig )-> Optional[int]:
super().__init__(_A )
snake_case = CLIPVisionModel(config.vision_config )
snake_case = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_A )
snake_case = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_A )
snake_case = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_A )
snake_case = nn.Parameter(torch.ones(17 ) , requires_grad=_A )
snake_case = nn.Parameter(torch.ones(3 ) , requires_grad=_A )
@torch.no_grad()
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any )-> Any:
snake_case = self.vision_model(_A )[1] # pooled_output
snake_case = self.visual_projection(_A )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case = cosine_distance(_A , self.special_care_embeds ).cpu().float().numpy()
snake_case = cosine_distance(_A , self.concept_embeds ).cpu().float().numpy()
snake_case = []
snake_case = image_embeds.shape[0]
for i in range(_A ):
snake_case = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case = special_cos_dist[i][concept_idx]
snake_case = self.special_care_embeds_weights[concept_idx].item()
snake_case = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case = cos_dist[i][concept_idx]
snake_case = self.concept_embeds_weights[concept_idx].item()
snake_case = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_A )
result.append(_A )
snake_case = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowerCAmelCase ( self : str , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor )-> Optional[int]:
snake_case = self.vision_model(_A )[1] # pooled_output
snake_case = self.visual_projection(_A )
snake_case = cosine_distance(_A , self.special_care_embeds )
snake_case = cosine_distance(_A , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case = 0.0
snake_case = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case = torch.any(special_scores > 0 , dim=1 )
snake_case = special_care * 0.01
snake_case = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 369 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Tuple = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2'''
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_params
__SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[str] = shard(_A )
__SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 74 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2FeatureExtractor"""]
lowercase_ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.