code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, ):
lowerCamelCase : Tuple = {}
if train_file is not None:
lowerCamelCase : List[Any] = [train_file]
if eval_file is not None:
lowerCamelCase : List[str] = [eval_file]
if test_file is not None:
lowerCamelCase : List[Any] = [test_file]
lowerCamelCase : List[str] = datasets.load_dataset("""csv""", data_files=__UpperCamelCase )
lowerCamelCase : str = list(ds[list(files.keys() )[0]].features.keys() )
lowerCamelCase : Union[str, Any] = features_name.pop(__UpperCamelCase )
lowerCamelCase : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowerCamelCase : List[str] = {label: i for i, label in enumerate(__UpperCamelCase )}
lowerCamelCase : Optional[Any] = tokenizer.model_input_names
lowerCamelCase : Optional[Any] = {}
if len(__UpperCamelCase ) == 1:
for k in files.keys():
lowerCamelCase : str = ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=__UpperCamelCase, max_length=__UpperCamelCase, padding="""max_length""" ), batched=__UpperCamelCase, )
elif len(__UpperCamelCase ) == 2:
for k in files.keys():
lowerCamelCase : List[Any] = ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=__UpperCamelCase, max_length=__UpperCamelCase, padding="""max_length""", ), batched=__UpperCamelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCamelCase : Dict = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCamelCase : Tuple = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCamelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase : Dict = labelaid[ex[label_name]]
yield (d, label)
lowerCamelCase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCamelCase : List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowerCamelCase : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCamelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCamelCase : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowerCamelCase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCamelCase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_lowerCamelCase =logging.getLogger(__name__)
@dataclass
class A__ :
_UpperCAmelCase : List[Any] = field(metadata={"""help""": """Which column contains the label"""})
_UpperCAmelCase : Union[str, Any] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the training file"""})
_UpperCAmelCase : str = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the development file"""})
_UpperCAmelCase : Union[str, Any] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the test file"""})
_UpperCAmelCase : List[Any] = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_UpperCAmelCase : Tuple = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
@dataclass
class A__ :
_UpperCAmelCase : int = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_UpperCAmelCase : Dict = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_UpperCAmelCase : Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_UpperCAmelCase : List[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Set this flag to use fast tokenization."""})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase : List[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def _a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowerCamelCase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
lowerCamelCase : Any = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=__UpperCamelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
lowerCamelCase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(__UpperCamelCase ), labelaid=__UpperCamelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task="""text-classification""", cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool(""".bin""" in model_args.model_name_or_path ), config=__UpperCamelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCamelCase ) -> Dict:
lowerCamelCase : List[Any] = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCamelCase : Optional[Any] = TFTrainer(
model=__UpperCamelCase, args=__UpperCamelCase, train_dataset=__UpperCamelCase, eval_dataset=__UpperCamelCase, compute_metrics=__UpperCamelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase : List[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase : List[Any] = trainer.evaluate()
lowerCamelCase : int = os.path.join(training_args.output_dir, """eval_results.txt""" )
with open(__UpperCamelCase, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(__UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 287 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 249 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : List[str] = {'''vocab_file''': '''spiece.model'''}
__A : List[Any] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__A : List[str] = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : int = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ['input_ids', 'attention_mask']
lowercase : List[int] = []
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
UpperCamelCase : Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
UpperCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def a_ ( self ):
return self.sp_model.get_piece_size()
def a_ ( self ):
UpperCamelCase : Any = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCamelCase : Dict = self.__dict__.copy()
UpperCamelCase : List[str] = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = []
UpperCamelCase : Dict = """"""
UpperCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
UpperCamelCase : List[str] = True
UpperCamelCase : str = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = kwargs.pop("""use_source_tokenizer""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase : Dict = []
UpperCamelCase : Union[str, Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Any = []
sub_texts.append(SCREAMING_SNAKE_CASE_ )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCamelCase : List[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(SCREAMING_SNAKE_CASE_ ) )
else:
UpperCamelCase : int = """""".join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase : Union[str, Any] = self.clean_up_tokenization(SCREAMING_SNAKE_CASE_ )
return clean_text
else:
return text
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fi:
UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
UpperCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 27 |
"""simple docstring"""
from typing import Any
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = data
UpperCamelCase : Optional[Any] = None
def __repr__( self ):
return f'Node({self.data})'
class lowerCamelCase :
def __init__( self ):
UpperCamelCase : Dict = None
def __iter__( self ):
UpperCamelCase : int = self.head
while node:
yield node.data
UpperCamelCase : Union[str, Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
UpperCamelCase : List[Any] = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = current.next
UpperCamelCase : Optional[Any] = data
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
UpperCamelCase : Optional[Any] = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
UpperCamelCase : Dict = new_node
elif index == 0:
UpperCamelCase : Any = self.head # link new_node to head
UpperCamelCase : Any = new_node
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : str = temp.next
UpperCamelCase : Any = temp.next
UpperCamelCase : Optional[Any] = new_node
def a_ ( self ): # print every node data
print(self )
def a_ ( self ):
return self.delete_nth(0 )
def a_ ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def a_ ( self , SCREAMING_SNAKE_CASE_ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
UpperCamelCase : Union[str, Any] = self.head # default first node
if index == 0:
UpperCamelCase : Optional[Any] = self.head.next
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : int = temp.next
UpperCamelCase : Optional[Any] = temp.next
UpperCamelCase : Dict = temp.next.next
return delete_node.data
def a_ ( self ):
return self.head is None
def a_ ( self ):
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Union[str, Any] = self.head
while current:
# Store the current node's next node.
UpperCamelCase : Optional[int] = current.next
# Make the current node's next point backwards
UpperCamelCase : Optional[Any] = prev
# Make the previous node be the current node
UpperCamelCase : int = current
# Make the current node the next node (to progress iteration)
UpperCamelCase : Optional[int] = next_node
# Return prev in order to put the head at the end
UpperCamelCase : Optional[int] = prev
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(snake_case_ ) == i
linked_list.insert_nth(snake_case_ ,i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 ,1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(snake_case_ ) == 9
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
UpperCamelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(-8 ,1 ) )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
UpperCamelCase : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase : Dict = linked_list.delete_head()
assert result == -9
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase : Optional[Any] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case_ )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
UpperCamelCase : List[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(snake_case_ )
print("""\nReading/changing Node data using indexing:""" )
print(f'Element at Position 1: {linked_list[1]}' )
UpperCamelCase : List[Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(snake_case_ )
print(f'length of linked_list is : {len(snake_case_ )}' )
if __name__ == "__main__":
main()
| 27 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowercase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=None , __A=None ) -> Union[str, Any]:
if "." in tensor_name:
_snake_case = tensor_name.split('.' )
for split in splits[:-1]:
_snake_case = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
_snake_case = new_module
_snake_case = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
_snake_case = tensor_name in module._buffers
_snake_case = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
_snake_case = False
_snake_case = False
if is_buffer or not is_bitsandbytes_available():
_snake_case = False
_snake_case = False
else:
_snake_case = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_snake_case = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_snake_case = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_snake_case = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
_snake_case = value.to('cpu' )
if value.dtype == torch.inta:
_snake_case = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
_snake_case = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
_snake_case = new_value.T
_snake_case = old_value.__dict__
if is_abit:
_snake_case = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
_snake_case = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
_snake_case = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
_snake_case = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
_snake_case = value.to(snake_case__ )
else:
_snake_case = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
_snake_case = new_value
else:
_snake_case = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
_snake_case = new_value
def SCREAMING_SNAKE_CASE__ ( __A , __A=None , __A=None , __A=None , __A=False ) -> Union[str, Any]:
for name, module in model.named_children():
if current_key_name is None:
_snake_case = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
_snake_case , _snake_case = module.weight.shape
else:
_snake_case = module.in_features
_snake_case = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_snake_case = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_snake_case = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_snake_case = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_snake_case = True
# Store the module class in case we need to transpose the weight later
_snake_case = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
_snake_case , _snake_case = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE__ ( __A , __A=None , __A=None , __A=None ) -> Any:
_snake_case = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
_snake_case , _snake_case = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def SCREAMING_SNAKE_CASE__ ( *__A , **__A ) -> Union[str, Any]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( *__A , **__A ) -> Any:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
_snake_case = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_snake_case = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
_snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_snake_case = sum(snake_case__ , [] )
_snake_case = len(snake_case__ ) > 0
# Check if it is a base model
_snake_case = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_snake_case = list(model.named_children() )
_snake_case = [list_modules[-1][0]]
# add last module together with tied weights
_snake_case = set(snake_case__ ) - set(snake_case__ )
_snake_case = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
_snake_case = ['.weight', '.bias']
_snake_case = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_snake_case = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 42 |
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE__ = num - 1
SCREAMING_SNAKE_CASE__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 165 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *__a : Dict , **__a : List[str] ):
pass
def _lowerCamelCase ( lowercase : Image ) -> str:
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowerCamelCase ( lowercase : Image ) -> Dict:
_a = np.array(lowercase )
_a = npimg.shape
return {"hash": hashimage(lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__a =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase__ ( self : int , __a : str , __a : Tuple , __a : List[Any] ):
_a = MaskGenerationPipeline(model=__a , image_processor=__a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self : List[Any] , __a : int , __a : Optional[Any] ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def UpperCamelCase__ ( self : Dict ):
pass
@slow
@require_torch
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
_a = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCamelCase__ ( self : str ):
_a = "facebook/sam-vit-huge"
_a = pipeline("mask-generation" , model=__a )
_a = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0053},
] , )
| 346 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser(lowercase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=lowercase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_a = " ".join(str(lowercase ).split(" " )[:-1] )
_a = ""
_a = eval(str(lowercase ).split(" " )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
_a = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 346 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__ ( _lowerCAmelCase ):
@slow
@require_torch
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__magic_name__ : Optional[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__magic_name__ : Optional[Any] = bertabert.config.encoder.vocab_size
__magic_name__ : str = tokenizer.sep_token_id
__magic_name__ : Optional[int] = tokenizer.cls_token_id
__magic_name__ : List[Any] = 1_28
__magic_name__ : Any = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__magic_name__ : Tuple = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__magic_name__ : List[Any] = train_dataset.select(range(32 ) )
__magic_name__ : Optional[Any] = val_dataset.select(range(16 ) )
__magic_name__ : str = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__magic_name__ : List[str] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=5_12 )
__magic_name__ : Tuple = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=1_28 )
__magic_name__ : Optional[int] = inputs.input_ids
__magic_name__ : int = inputs.attention_mask
__magic_name__ : Union[str, Any] = outputs.input_ids
__magic_name__ : Optional[Any] = outputs.input_ids.copy()
__magic_name__ : str = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__magic_name__ : int = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_12 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase__ ):
__magic_name__ : Any = pred.label_ids
__magic_name__ : Union[str, Any] = pred.predictions
# all unnecessary tokens are removed
__magic_name__ : Tuple = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__magic_name__ : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
__magic_name__ : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__magic_name__ : Dict = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__magic_name__ : Optional[Any] = self.get_auto_remove_tmp_dir()
__magic_name__ : List[str] = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy="""steps""" , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__magic_name__ : Union[str, Any] = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train()
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__magic_name__ : int = sorted(string.lower() )
return len(_A ) == len(set(_A ) )
if __name__ == "__main__":
__magic_name__: Dict = input("Enter a string ").strip()
__magic_name__: Union[str, Any] = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 342 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''instructblip_vision_model'''
def __init__( self , _lowerCamelCase=1408 , _lowerCamelCase=6144 , _lowerCamelCase=39 , _lowerCamelCase=16 , _lowerCamelCase=224 , _lowerCamelCase=14 , _lowerCamelCase="gelu" , _lowerCamelCase=1e-6 , _lowerCamelCase=0.0 , _lowerCamelCase=1e-10 , _lowerCamelCase=True , **_lowerCamelCase , ) -> int:
super().__init__(**_lowerCamelCase )
A_ : str = hidden_size
A_ : List[Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = patch_size
A_ : str = image_size
A_ : List[Any] = initializer_range
A_ : Any = attention_dropout
A_ : Tuple = layer_norm_eps
A_ : Optional[int] = hidden_act
A_ : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
A_ , A_ : List[str] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
A_ : Union[str, Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''instructblip_qformer'''
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=2 , _lowerCamelCase=1408 , **_lowerCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
A_ : List[Any] = vocab_size
A_ : Tuple = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Any = hidden_act
A_ : Optional[int] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : str = initializer_range
A_ : Dict = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : Tuple = cross_attention_frequency
A_ : List[Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
A_ , A_ : Optional[int] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
A_ : Optional[Any] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''instructblip'''
lowerCamelCase = True
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=32 , **_lowerCamelCase ) -> List[Any]:
super().__init__(**_lowerCamelCase )
if vision_config is None:
A_ : Union[str, Any] = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
A_ : Optional[int] = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
A_ : List[str] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
A_ : List[str] = InstructBlipVisionConfig(**_lowerCamelCase )
A_ : List[str] = InstructBlipQFormerConfig(**_lowerCamelCase )
A_ : List[str] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
A_ : List[Any] = CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
A_ : Tuple = self.text_config.tie_word_embeddings
A_ : List[str] = self.text_config.is_encoder_decoder
A_ : List[Any] = num_query_tokens
A_ : Optional[int] = self.vision_config.hidden_size
A_ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A_ : List[str] = 1.0
A_ : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) -> Optional[int]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCamelCase , )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Dict = self.vision_config.to_dict()
A_ : List[str] = self.qformer_config.to_dict()
A_ : List[Any] = self.text_config.to_dict()
A_ : Optional[Any] = self.__class__.model_type
return output
| 164 |
'''simple docstring'''
from statistics import mean, stdev
def UpperCAmelCase ( a_ , a_ = 3 ) -> list:
"""simple docstring"""
A_ : Tuple = min(a_ )
A_ : Union[str, Any] = max(a_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , a_ ) for x in data]
def UpperCAmelCase ( a_ , a_ = 3 ) -> list:
"""simple docstring"""
A_ : List[str] = mean(a_ )
A_ : List[str] = stdev(a_ )
# standardize data
return [round((x - mu) / (sigma) , a_ ) for x in data]
| 164 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowercase ( __UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase : int = ""
UpperCamelCase : Optional[Any] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , A = None , A = None , **A , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(self , **lowerCamelCase__ )
lowerCamelCase = repo_info
lowerCamelCase = token
lowerCamelCase = None
def __A ( self ) -> List[Any]:
'''simple docstring'''
if self.dir_cache is None:
lowerCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCamelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCamelCase__ ): {"""name""": str(lowerCamelCase__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , A , A = "rb" , **A , ) -> Dict:
'''simple docstring'''
if not isinstance(self.repo_info , lowerCamelCase__ ):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' )
lowerCamelCase = hf_hub_url(self.repo_info.id , lowerCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase__ , mode=lowerCamelCase__ , headers=get_authentication_headers_for_url(lowerCamelCase__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , A , **A ) -> List[str]:
'''simple docstring'''
self._get_dirs()
lowerCamelCase = self._strip_protocol(lowerCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase__ )
def __A ( self , A , A=False , **A ) -> Optional[Any]:
'''simple docstring'''
self._get_dirs()
lowerCamelCase = PurePosixPath(path.strip("""/""" ) )
lowerCamelCase = {}
for p, f in self.dir_cache.items():
lowerCamelCase = PurePosixPath(p.strip("""/""" ) )
lowerCamelCase = p.parent
if root == path:
lowerCamelCase = f
lowerCamelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 252 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
while second != 0:
__lowerCAmelCase = first & second
first ^= second
__lowerCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input("""Enter the first number: """).strip())
UpperCamelCase__ = int(input("""Enter the second number: """).strip())
print(f'''{add(first, second) = }''')
| 352 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 102 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def A_ ( _lowercase ):
'''simple docstring'''
create_state_space_tree(_lowercase, [], 0 )
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if index == len(_lowercase ):
print(_lowercase )
return
create_state_space_tree(_lowercase, _lowercase, index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowercase, _lowercase, index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__a = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 66 |
UpperCAmelCase__ : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( a , a , a ) -> list[str]:
_A: Union[str, Any] = set()
# keep track of all the paths to be checked
_A: Union[str, Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_A: Any = queue.pop(0 )
# get the last node from the path
_A: Union[str, Any] = path[-1]
if node not in explored:
_A: str = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_A: Optional[int] = list(a )
new_path.append(a )
queue.append(a )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( a , a , a ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_A: Any = [start]
_A: List[str] = set(a )
# Keep tab on distances from `start` node.
_A: Optional[int] = {start: 0, target: -1}
while queue:
_A: Union[str, Any] = queue.pop(0 )
if node == target:
_A: Dict = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a )
queue.append(a )
_A: List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 121 | 0 |
def _A ( _a : str , _a : list[str] ):
"""simple docstring"""
A = """"""
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 359 |
"""simple docstring"""
import torch
from transformers import AutoModel
class lowerCamelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_="sayef/fsner-bert-base-uncased" ) -> Tuple:
super(lowerCamelCase_ ,self ).__init__()
A = AutoModel.from_pretrained(lowerCamelCase_ ,return_dict=lowerCamelCase_ )
A = torch.nn.CosineSimilarity(3 ,1E-08 )
A = torch.nn.Softmax(dim=1 )
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> str:
return self.bert(**lowerCamelCase_ ).last_hidden_state
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
return token_embeddings.sum(2 ,keepdim=lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=1 ) -> Optional[int]:
return self.softmax(T * self.cos(lowerCamelCase_ ,lowerCamelCase_ ) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = W_supports["""sizes"""].tolist()
A = W_supports["""start_token_id"""].item()
A = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A = self.BERT(**lowerCamelCase_ )
A = self.BERT(**lowerCamelCase_ )
A = None
A = None
A = W_supports["""input_ids"""] == start_token_id
A = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
A = 0
else:
A = support_sizes[i - 1]
A = S[s : s + size][start_token_masks[s : s + size]]
A = S[s : s + size][end_token_masks[s : s + size]]
A = torch.matmul(q[i] ,s_start.T ).sum(1 ).softmax(0 )
A = torch.matmul(q[i] ,s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A = torch.vstack((p_starts, p_start) )
A = torch.vstack((p_ends, p_end) )
else:
A = p_start
A = p_end
return p_starts, p_ends
| 77 | 0 |
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> None:
__snake_case : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__snake_case : List[str] = False
def __snake_case ( self : Any , lowerCamelCase : list[str] ) -> None:
for word in words:
self.insert(lowerCamelCase )
def __snake_case ( self : List[str] , lowerCamelCase : str ) -> None:
__snake_case : Dict = self
for char in word:
if char not in curr.nodes:
__snake_case : List[Any] = TrieNode()
__snake_case : List[Any] = curr.nodes[char]
__snake_case : Union[str, Any] = True
def __snake_case ( self : Optional[int] , lowerCamelCase : str ) -> bool:
__snake_case : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
__snake_case : Union[str, Any] = curr.nodes[char]
return curr.is_leaf
def __snake_case ( self : Any , lowerCamelCase : str ) -> None:
def _delete(lowerCamelCase : TrieNode , lowerCamelCase : str , lowerCamelCase : int ) -> bool:
if index == len(lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
__snake_case : str = False
return len(curr.nodes ) == 0
__snake_case : List[Any] = word[index]
__snake_case : Optional[int] = curr.nodes.get(lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__snake_case : int = _delete(lowerCamelCase , lowerCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowerCamelCase , 0 )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if node.is_leaf:
print(_lowerCamelCase , end=" " )
for key, value in node.nodes.items():
print_words(_lowerCamelCase , word + key )
def lowerCAmelCase_ ( ):
__snake_case : int = "banana bananas bandana band apple all beast".split()
__snake_case : Union[str, Any] = TrieNode()
root.insert_many(_lowerCamelCase )
# print_words(root, "")
assert all(root.find(_lowerCamelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
print(str(_lowerCamelCase ) , "works!" if passes else "doesn't work :(" )
def lowerCAmelCase_ ( ):
assert test_trie()
def lowerCAmelCase_ ( ):
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 123 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[str] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 232 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =SwinConfig(image_size=192 )
if "base" in model_name:
__lowercase =6
__lowercase =128
__lowercase =(2, 2, 18, 2)
__lowercase =(4, 8, 16, 32)
elif "large" in model_name:
__lowercase =12
__lowercase =192
__lowercase =(2, 2, 18, 2)
__lowercase =(6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__lowercase =window_size
__lowercase =embed_dim
__lowercase =depths
__lowercase =num_heads
return config
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if "encoder.mask_token" in name:
__lowercase =name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__lowercase =name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__lowercase =name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__lowercase =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowercase =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowercase =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowercase =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowercase =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__lowercase ='layernorm.weight'
if name == "encoder.norm.bias":
__lowercase ='layernorm.bias'
if "decoder" in name:
pass
else:
__lowercase ='swin.' + name
return name
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__lowercase =orig_state_dict.pop(_lowerCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowercase =key.split('.' )
__lowercase =int(key_split[2] )
__lowercase =int(key_split[4] )
__lowercase =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase =val[:dim, :]
__lowercase =val[
dim : dim * 2, :
]
__lowercase =val[-dim:, :]
else:
__lowercase =val[
:dim
]
__lowercase =val[
dim : dim * 2
]
__lowercase =val[
-dim:
]
else:
__lowercase =val
return orig_state_dict
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =torch.load(_lowerCAmelCase , map_location='cpu' )['model']
__lowercase =get_swin_config(_lowerCAmelCase )
__lowercase =SwinForMaskedImageModeling(_lowerCAmelCase )
model.eval()
__lowercase =convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =ViTImageProcessor(size={'height': 192, 'width': 192} )
__lowercase =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
__lowercase =image_processor(images=_lowerCAmelCase , return_tensors='pt' )
with torch.no_grad():
__lowercase =model(**_lowerCAmelCase ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 48 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A__: str = logging.get_logger(__name__)
A__: List[str] = {'''vocab_file''': '''vocab.txt'''}
A__: List[str] = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
A__: List[str] = {
'''facebook/esm2_t6_8M_UR50D''': 1024,
'''facebook/esm2_t12_35M_UR50D''': 1024,
}
def lowerCAmelCase_ ( A_):
with open(A_ ,"r") as f:
UpperCamelCase__: List[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Dict="<unk>" , __lowerCamelCase: str="<cls>" , __lowerCamelCase: Dict="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: str="<eos>" , **__lowerCamelCase: Tuple , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
UpperCamelCase__: Tuple = load_vocab_file(__lowerCamelCase )
UpperCamelCase__: str = dict(enumerate(self.all_tokens ) )
UpperCamelCase__: List[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__: Optional[Any] = unk_token
UpperCamelCase__: int = cls_token
UpperCamelCase__: Union[str, Any] = pad_token
UpperCamelCase__: Dict = mask_token
UpperCamelCase__: Dict = eos_token
UpperCamelCase__: Union[str, Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: int ):
'''simple docstring'''
return self._id_to_token.get(__lowerCamelCase , self.unk_token )
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: str ):
'''simple docstring'''
return self._token_to_id.get(__lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: Optional[int] , **__lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return text.split()
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Dict=False ):
'''simple docstring'''
return len(self._id_to_token )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: str ):
'''simple docstring'''
return self._token_to_id.get(__lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: int ):
'''simple docstring'''
return self._id_to_token.get(__lowerCamelCase , self.unk_token )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: List[str] = [self.cls_token_id]
UpperCamelCase__: Any = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: List , __lowerCamelCase: Optional[List] = None , __lowerCamelCase: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__: Union[str, Any] = [1] + ([0] * len(__lowerCamelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(__lowerCamelCase ) + [1]
return mask
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: str = os.path.join(__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(__lowerCamelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Union[List[str], List[AddedToken]] , __lowerCamelCase: bool = False ):
'''simple docstring'''
return super()._add_tokens(__lowerCamelCase , special_tokens=__lowerCamelCase )
| 149 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A__: Optional[int] = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
A__: int = logging.WARNING
def lowerCAmelCase_ ( ):
UpperCamelCase__: Optional[int] = os.getenv("DATASETS_VERBOSITY" ,A_)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys()) }")
return _default_log_level
def lowerCAmelCase_ ( ):
return __name__.split(".")[0]
def lowerCAmelCase_ ( ):
return logging.getLogger(_get_library_name())
def lowerCAmelCase_ ( ):
# Apply our default configuration to the library root logger.
UpperCamelCase__: Tuple = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def lowerCAmelCase_ ( ):
UpperCamelCase__: Tuple = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def lowerCAmelCase_ ( A_ = None):
if name is None:
UpperCamelCase__: Optional[Any] = _get_library_name()
return logging.getLogger(A_)
def lowerCAmelCase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( A_):
_get_library_root_logger().setLevel(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[Any] = False
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[str] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _a :
"""simple docstring"""
def __init__( self: int , *__lowerCamelCase: Tuple , **__lowerCamelCase: str ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCamelCase__: int = args[0] if args else None
def __iter__( self: Optional[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self: Dict , __lowerCamelCase: Any ):
'''simple docstring'''
def empty_fn(*__lowerCamelCase: Any , **__lowerCamelCase: Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self: str ):
'''simple docstring'''
return self
def __exit__( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return
A__: Tuple = True
class _a :
"""simple docstring"""
def __call__( self: Any , *__lowerCamelCase: List[str] , __lowerCamelCase: List[Any]=False , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__lowerCamelCase , **__lowerCamelCase )
else:
return EmptyTqdm(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] , *__lowerCamelCase: List[str] , **__lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A__: Optional[Any] = _tqdm_cls()
def lowerCAmelCase_ ( ):
global _tqdm_active
return bool(_tqdm_active)
def lowerCAmelCase_ ( ):
global _tqdm_active
UpperCamelCase__: int = True
def lowerCAmelCase_ ( ):
global _tqdm_active
UpperCamelCase__: str = False
| 149 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
if len(UpperCamelCase ) <= 1:
return [tuple(UpperCamelCase )]
lowerCamelCase__ : List[str] = []
def generate(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = [0] * n
res.append(tuple(UpperCamelCase ) )
lowerCamelCase__ : List[str] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase__ : Any = arr[i], arr[0]
else:
lowerCamelCase__ : Union[str, Any] = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase ) )
c[i] += 1
lowerCamelCase__ : Union[str, Any] = 0
else:
lowerCamelCase__ : int = 0
i += 1
generate(len(UpperCamelCase ) , UpperCamelCase )
return res
if __name__ == "__main__":
_A : List[Any] =input('''Enter numbers separated by a comma:\n''').strip()
_A : Any =[int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 361 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : str ={
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Union[str, Any] ={
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : str ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : int ={
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if isinstance(UpperCamelCase , UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Any:
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> str:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCamelCase__ : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : str = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Optional[int] = checkpoint["""time_embed.0.weight"""]
lowerCamelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCamelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCamelCase__ : Optional[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : Optional[Any] = checkpoint["""label_emb.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""input_blocks.0.0.weight"""]
lowerCamelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
lowerCamelCase__ : Optional[Any] = unet_config["""down_block_types"""]
lowerCamelCase__ : Any = unet_config["""layers_per_block"""]
lowerCamelCase__ : Any = unet_config["""attention_head_dim"""]
lowerCamelCase__ : List[Any] = unet_config["""block_out_channels"""]
lowerCamelCase__ : str = 1
lowerCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase ):
lowerCamelCase__ : List[Any] = channels_list[i]
lowerCamelCase__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : int = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : List[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Any = f'''down_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.1'''
lowerCamelCase__ : Tuple = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.downsamplers.0'''
lowerCamelCase__ : str = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
lowerCamelCase__ : Union[str, Any] = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : Any = """mid_block.resnets.0"""
lowerCamelCase__ : Optional[Any] = """middle_block.0"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = """mid_block.attentions.0"""
lowerCamelCase__ : Dict = """middle_block.1"""
lowerCamelCase__ : int = convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = """mid_block.resnets.1"""
lowerCamelCase__ : Tuple = """middle_block.2"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Any = unet_config["""up_block_types"""]
for i, layer_type in enumerate(UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : int = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Any = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Dict = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer-1}.1'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : str = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Any = f'''output_blocks.{current_layer}.1'''
lowerCamelCase__ : Optional[int] = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.2'''
lowerCamelCase__ : List[str] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = checkpoint["""out.0.weight"""]
lowerCamelCase__ : Dict = checkpoint["""out.0.bias"""]
lowerCamelCase__ : Dict = checkpoint["""out.2.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_A : Tuple =argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_A : Tuple =parser.parse_args()
_A : Optional[int] =strabool(args.class_cond)
_A : List[str] =os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : int =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Tuple =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : Any =TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_A : str =None
_A : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config)
_A : Optional[int] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : Tuple =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : int =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Union[str, Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
_A : str =CMStochasticIterativeScheduler(**scheduler_config)
_A : Optional[Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 129 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 8 , _SCREAMING_SNAKE_CASE : int | None = None ):
__a : int = np.random.default_rng(seed=_SCREAMING_SNAKE_CASE )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__a : Dict = 6 * key_len
# Measurement basis for Alice's qubits.
__a : str = rng.integers(2 , size=_SCREAMING_SNAKE_CASE )
# The set of states Alice will prepare.
__a : Dict = rng.integers(2 , size=_SCREAMING_SNAKE_CASE )
# Measurement basis for Bob's qubits.
__a : List[str] = rng.integers(2 , size=_SCREAMING_SNAKE_CASE )
# Quantum Circuit to simulate BB84
__a : Union[str, Any] = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_SCREAMING_SNAKE_CASE ):
if alice_state[index] == 1:
bbaa_circ.x(_SCREAMING_SNAKE_CASE )
if alice_basis[index] == 1:
bbaa_circ.h(_SCREAMING_SNAKE_CASE )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_SCREAMING_SNAKE_CASE ):
if bob_basis[index] == 1:
bbaa_circ.h(_SCREAMING_SNAKE_CASE )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__a : str = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__a : List[Any] = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1 , seed_simulator=_SCREAMING_SNAKE_CASE )
# Returns the result of measurement.
__a : Tuple = job.result().get_counts(_SCREAMING_SNAKE_CASE ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__a : int = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__a : Any = gen_key[:key_len] if len(_SCREAMING_SNAKE_CASE ) >= key_len else gen_key.ljust(_SCREAMING_SNAKE_CASE , '0' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 27 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {'vocab_file': 'vocab.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
__UpperCamelCase : int = {'mgp-str': 27}
class lowercase__ ( lowerCamelCase__):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]="[GO]" , UpperCamelCase__ : List[Any]="[GO]" , UpperCamelCase__ : Tuple="[s]" , UpperCamelCase__ : Optional[int]="[GO]" , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.vocab.items()}
@property
def __A ( self : Dict ):
'''simple docstring'''
return len(self.vocab )
def __A ( self : Any ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __A ( self : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
for s in text:
char_tokens.extend(__lowerCamelCase )
return char_tokens
def __A ( self : List[Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def __A ( self : Tuple , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase )
def __A ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' )
return (vocab_file,)
| 356 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = KandinskyInpaintPipeline
UpperCamelCase_ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase_ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ = False
@property
def __A ( self : Tuple ):
'''simple docstring'''
return 32
@property
def __A ( self : List[str] ):
'''simple docstring'''
return 32
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Any = MultilingualCLIP(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def __A ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Dict = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[str] = self.dummy_unet
SCREAMING_SNAKE_CASE : int = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase__ )
# create init_image
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE : Tuple = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : List[Any] = 0
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''cpu'''
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __A ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : int = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = '''a hat'''
SCREAMING_SNAKE_CASE : Dict = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 258 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a :Dict = get_tests_dir("fixtures")
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = mock.Mock()
SCREAMING_SNAKE_CASE__ : List[str] = 500
SCREAMING_SNAKE_CASE__ : List[str] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = HTTPError
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ : Tuple = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_a ) as mock_head:
SCREAMING_SNAKE_CASE__ : List[Any] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(_a )
@is_staging_test
class __a (unittest.TestCase):
'''simple docstring'''
@classmethod
def _a ( cls ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TOKEN
HfFolder.save_token(_a )
@classmethod
def _a ( cls ) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTImageProcessor.from_pretrained(_a )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_a , repo_id="""test-image-processor""" , push_to_hub=_a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor.from_pretrained(_a )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_a , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=_a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def _a ( self ) -> Any:
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Tuple = CustomImageProcessor.from_pretrained(_a )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
SCREAMING_SNAKE_CASE__ : List[str] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=_a )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 132 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a :Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
a :Tuple = 50_003
a :Optional[int] = 50_002
@require_sentencepiece
@require_tokenizers
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = PLBartTokenizer
_SCREAMING_SNAKE_CASE :List[str] = None
_SCREAMING_SNAKE_CASE :Any = False
def _a ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Any = PLBartTokenizer(_a , language_codes="""base""" , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = PLBartTokenizer(_a , language_codes="""base""" , keep_accents=_a )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Dict = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 4 , _a )]
self.assertListEqual(_a , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
SCREAMING_SNAKE_CASE__ : List[str] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = PLBartTokenizer(_a , language_codes="""multi""" , keep_accents=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 7 , _a )]
self.assertListEqual(
_a , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
SCREAMING_SNAKE_CASE__ : Tuple = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = """uclanlp/plbart-python-en_XX"""
_SCREAMING_SNAKE_CASE :List[Any] = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
_SCREAMING_SNAKE_CASE :Optional[Any] = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
_SCREAMING_SNAKE_CASE :str = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def _a ( cls ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
SCREAMING_SNAKE_CASE__ : Any = 1
return cls
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50_003 )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def _a ( self ) -> Dict:
"""simple docstring"""
self.assertIn(_a , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Tuple = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer.decode(_a , skip_special_tokens=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _a )
self.assertEqual(len(_a ) , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50_004, 50_001] )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Dict = PLBartTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a )
@require_torch
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=_a , truncation=_a , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : List[Any] = targets["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Dict = shift_tokens_right(_a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(_a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50_003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50_001,
} , )
| 132 | 1 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=16 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=2 , lowerCamelCase=32 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=30 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = d_model
__a = d_model
__a = decoder_layers
__a = decoder_layers
__a = decoder_ffn_dim
__a = decoder_attention_heads
__a = decoder_attention_heads
__a = eos_token_id
__a = bos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = use_cache
__a = max_position_embeddings
__a = None
__a = decoder_seq_length
__a = 2
__a = 1
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__a = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = True
__a = TrOCRDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
__a = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_snake_case : Optional[int] = (TrOCRForCausalLM,) if is_torch_available() else ()
_snake_case : str = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
_snake_case : Optional[Any] = True
_snake_case : Any = False
def a__ ( self ):
__a = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase )
__a = ConfigTester(self , config_class=lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase )
def a__ ( self ):
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a__ ( self ):
pass
| 268 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:int = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : List[Any] = """focalnet"""
def __init__( self , lowerCamelCase=224 , lowerCamelCase=4 , lowerCamelCase=3 , lowerCamelCase=96 , lowerCamelCase=False , lowerCamelCase=[192, 384, 768, 768] , lowerCamelCase=[2, 2, 6, 2] , lowerCamelCase=[2, 2, 2, 2] , lowerCamelCase=[3, 3, 3, 3] , lowerCamelCase="gelu" , lowerCamelCase=4.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=False , lowerCamelCase=1E-4 , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0.02 , lowerCamelCase=1E-5 , lowerCamelCase=32 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = use_conv_embed
__a = hidden_sizes
__a = depths
__a = focal_levels
__a = focal_windows
__a = hidden_act
__a = mlp_ratio
__a = hidden_dropout_prob
__a = drop_path_rate
__a = use_layerscale
__a = layerscale_value
__a = use_post_layernorm
__a = use_post_layernorm_in_modulation
__a = normalize_modulator
__a = initializer_range
__a = layer_norm_eps
__a = encoder_stride
__a = ["stem"] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 268 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
__snake_case : List[Any] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
__snake_case : str = {
"""allenai/longformer-base-4096""": 40_96,
"""allenai/longformer-large-4096""": 40_96,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 40_96,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 40_96,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Optional[Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
a_ : int = bs[:]
a_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(a__)
cs.append(2**8 + n)
n += 1
a_ : List[Any] = [chr(a__) for n in cs]
return dict(zip(a__ , a__))
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = set()
a_ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a_ : Dict = char
return pairs
class A__(a_ ):
"""simple docstring"""
_A : Tuple = VOCAB_FILES_NAMES
_A : List[str] = PRETRAINED_VOCAB_FILES_MAP
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ) -> Union[str, Any]:
a_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
a_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
a_ : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
a_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
a_ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
a_ : Optional[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a_ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
a_ : Tuple = json.load(_lowercase )
a_ : str = {v: k for k, v in self.encoder.items()}
a_ : Union[str, Any] = errors # how to handle errors in decoding
a_ : Tuple = bytes_to_unicode()
a_ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
a_ : List[str] = merges_handle.read().split("""\n""" )[1:-1]
a_ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
a_ : Optional[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
a_ : str = {}
a_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a_ : Optional[int] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCamelCase__ ( self ) -> Dict:
return len(self.encoder )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , _lowercase ) -> int:
if token in self.cache:
return self.cache[token]
a_ : Union[str, Any] = tuple(_lowercase )
a_ : Optional[int] = get_pairs(_lowercase )
if not pairs:
return token
while True:
a_ : Any = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ : Optional[int] = bigram
a_ : Optional[Any] = []
a_ : Optional[Any] = 0
while i < len(_lowercase ):
try:
a_ : Any = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a_ : Any = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ : str = tuple(_lowercase )
a_ : List[str] = new_word
if len(_lowercase ) == 1:
break
else:
a_ : Tuple = get_pairs(_lowercase )
a_ : List[Any] = """ """.join(_lowercase )
a_ : List[str] = word
return word
def UpperCamelCase__ ( self , _lowercase ) -> List[str]:
a_ : Optional[int] = []
for token in re.findall(self.pat , _lowercase ):
a_ : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self , _lowercase ) -> int:
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self , _lowercase ) -> Any:
return self.decoder.get(_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> Tuple:
a_ : Dict = """""".join(_lowercase )
a_ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a_ : List[str] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Optional[Any] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
a_ : Tuple = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a_ : List[str] = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
a_ : Any = [self.sep_token_id]
a_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> Union[str, Any]:
a_ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
a_ : str = """ """ + text
return (text, kwargs)
| 248 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A__(unittest.TestCase ):
"""simple docstring"""
_A : List[str] = StableDiffusionLDMaDPipeline
_A : int = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a_ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
a_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ : Tuple = CLIPTextModel(_lowercase )
a_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Any:
if str(_lowercase ).startswith("""mps""" ):
a_ : Optional[Any] = torch.manual_seed(_lowercase )
else:
a_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Any = self.get_dummy_components()
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Union[str, Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : int = self.get_dummy_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : Tuple = output.rgb, output.depth
a_ : Union[str, Any] = rgb[0, -3:, -3:, -1]
a_ : Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[Any] = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
a_ : int = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = self.get_dummy_components()
a_ : Optional[int] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Optional[Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
a_ : Optional[int] = ldmad_pipe(**_lowercase )
a_ , a_ : Any = output.rgb, output.depth
a_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs.pop("""prompt""" )]
a_ : List[Any] = ldmad_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
a_ : int = text_inputs["""input_ids"""].to(_lowercase )
a_ : Any = ldmad_pipe.text_encoder(_lowercase )[0]
a_ : Dict = prompt_embeds
# forward
a_ : int = ldmad_pipe(**_lowercase )
a_ , a_ : Optional[int] = output.rgb, output.depth
a_ : List[str] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : Any = PNDMScheduler(skip_prk_steps=_lowercase )
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[Any] = self.get_dummy_inputs(_lowercase )
a_ : int = """french fries"""
a_ : Any = ldmad_pipe(**_lowercase , negative_prompt=_lowercase )
a_ , a_ : Optional[Any] = output.rgb, output.depth
a_ : Tuple = rgb[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[int] = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
a_ : Union[str, Any] = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> List[str]:
a_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Dict = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Tuple = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Any:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_inputs(_lowercase )
a_ : Optional[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : int = output.rgb, output.depth
a_ : str = rgb[0, -3:, -3:, -1].flatten()
a_ : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
a_ : Optional[int] = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
a_ : Optional[int] = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> str:
a_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Tuple = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Any = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : Union[str, Any] = ldmad_pipe(**_lowercase )
a_ , a_ : str = output.rgb, output.depth
a_ : List[str] = 0.4_9_5_5_8_6
a_ : int = 0.3_3_7_9_5_5_1_5
a_ : int = 1_1_2.4_8_5_1_8
a_ : Optional[int] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : List[Any] = output.rgb, output.depth
a_ : int = 0.4_1_9_4_1_2_7
a_ : List[str] = 0.3_5_3_7_5_5_8_6
a_ : Optional[int] = 0.5_6_3_8_5_0_2
a_ : str = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 248 | 1 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase__ : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : int = os.path.dirname(os.path.realpath(a_ ) )
A_ : Dict = os.path.join(a_ , """words.txt""" )
A_ : str = """"""
with open(a_ ) as f:
A_ : str = f.readline()
A_ : Optional[Any] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
A_ : str = [
word
for word in [sum(ord(a_ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a_ )
if __name__ == "__main__":
print(solution())
| 352 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = {'vocab_file': 'spm_char.model'}
UpperCamelCase__ : Optional[Any] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
UpperCamelCase__ : Union[str, Any] = {
'microsoft/speecht5_asr': 1_024,
'microsoft/speecht5_tts': 1_024,
'microsoft/speecht5_vc': 1_024,
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase = None , **_lowerCamelCase , ) -> None:
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : List[Any] = vocab_file
A_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ) -> int:
A_ : Dict = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
A_ : Optional[int] = self.__dict__.copy()
A_ : str = None
return state
def __setstate__( self , _lowerCamelCase ) -> List[str]:
A_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Union[str, Any] = {}
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.piece_to_id(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
A_ : Dict = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = []
A_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
A_ : Optional[int] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + suffix_ones
return ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 164 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = """vivit"""
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=3_2 , __UpperCAmelCase=[2, 1_6, 1_6] , __UpperCAmelCase=3 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu_fast" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-06 , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Optional[Any] = num_hidden_layers
lowerCAmelCase__ :Dict = num_attention_heads
lowerCAmelCase__ :Optional[Any] = intermediate_size
lowerCAmelCase__ :Optional[Any] = hidden_act
lowerCAmelCase__ :List[Any] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :List[str] = initializer_range
lowerCAmelCase__ :List[str] = layer_norm_eps
lowerCAmelCase__ :List[str] = image_size
lowerCAmelCase__ :Union[str, Any] = num_frames
lowerCAmelCase__ :Optional[Any] = tubelet_size
lowerCAmelCase__ :Union[str, Any] = num_channels
lowerCAmelCase__ :int = qkv_bias
super().__init__(**__UpperCAmelCase )
| 293 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(a )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = {}
lowerCAmelCase__ :Tuple = {}
lowerCAmelCase__ :Any = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCAmelCase__ :Dict = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
lowerCAmelCase__ :Union[str, Any] = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
lowerCAmelCase__ :Any = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
lowerCAmelCase__ :Any = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
lowerCAmelCase__ :Dict = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCAmelCase__ :Tuple = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
lowerCAmelCase__ :Optional[int] = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
lowerCAmelCase__ :List[Any] = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
lowerCAmelCase__ :Optional[Any] = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
lowerCAmelCase__ :int = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
lowerCAmelCase__ :Union[str, Any] = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
lowerCAmelCase__ :Optional[Any] = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=6_4 , __UpperCAmelCase = 0 , __UpperCAmelCase = 5_1_2 / 1_5_0_0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 1 , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(__UpperCAmelCase )
lowerCAmelCase__ :int = self.image_processor.size['longest_edge']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = self.image_processor.generate_crop_boxes(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.image_processor(images=__UpperCAmelCase , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
lowerCAmelCase__ :Optional[int] = self.get_inference_context()
with inference_context():
lowerCAmelCase__ :Any = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device )
lowerCAmelCase__ :Tuple = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
lowerCAmelCase__ :Optional[int] = image_embeddings
lowerCAmelCase__ :List[Any] = grid_points.shape[1]
lowerCAmelCase__ :Union[str, Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = grid_points[:, i : i + points_per_batch, :, :]
lowerCAmelCase__ :List[str] = input_labels[:, i : i + points_per_batch]
lowerCAmelCase__ :List[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0.88 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , ):
'''simple docstring'''
lowerCAmelCase__ :Any = model_inputs.pop('input_boxes' )
lowerCAmelCase__ :Optional[int] = model_inputs.pop('is_last' )
lowerCAmelCase__ :Dict = model_inputs.pop('original_sizes' ).tolist()
lowerCAmelCase__ :Dict = model_inputs.pop('reshaped_input_sizes' ).tolist()
lowerCAmelCase__ :Optional[int] = self.model(**__UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCAmelCase__ :int = model_outputs['pred_masks']
lowerCAmelCase__ :Optional[Any] = self.image_processor.post_process_masks(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase )
lowerCAmelCase__ :Any = model_outputs['iou_scores']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.7 , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Optional[Any] = []
lowerCAmelCase__ :int = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self.image_processor.post_process_for_mask_generation(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = defaultdict(__UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {}
if output_rle_mask:
lowerCAmelCase__ :str = rle_mask
if output_bboxes_mask:
lowerCAmelCase__ :Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 293 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int:
"""simple docstring"""
A__ = 1
A__ = 2
for i in range(2 , max_n + 1 ):
A__ = pre_numerator
A__ = 2 * i // 3 if i % 3 == 0 else 1
A__ = cur_numerator
A__ = e_cont * pre_numerator + temp
return sum_digits(lowercase_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 231 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=14 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=99 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = rotary_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = vocab_size - 1
A__ = vocab_size - 1
A__ = vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = 20
A__ = model_class_name(UpperCAmelCase__)
A__ = model.init_cache(input_ids.shape[0] , UpperCAmelCase__)
A__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''')
A__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
A__ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
A__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''')
A__ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__)
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->Any:
'''simple docstring'''
A__ = 20
A__ = model_class_name(UpperCAmelCase__)
A__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
A__ = model.init_cache(input_ids.shape[0] , UpperCAmelCase__)
A__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
A__ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
A__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''')
A__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__)
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
@require_flax
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCAmelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = FlaxGPTJModelTester(self)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
@tooslow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''')
A__ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)
A__ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''')
A__ = False
A__ = model.config.eos_token_id
A__ = jax.jit(model.generate)
A__ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id).sequences
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
A__ = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ = getattr(UpperCAmelCase__ , UpperCAmelCase__)
A__ , A__ = pt_inputs['''input_ids'''].shape
A__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase__):
A__ = 0
A__ = 1
A__ = 0
A__ = 1
A__ = pt_model_class(UpperCAmelCase__).eval()
A__ = model_class(UpperCAmelCase__ , dtype=jnp.floataa)
A__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase__)
A__ = fx_state
with torch.no_grad():
A__ = pt_model(**UpperCAmelCase__).to_tuple()
A__ = fx_model(**UpperCAmelCase__).to_tuple()
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase__)
A__ = model_class.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__)
A__ = fx_model_loaded(**UpperCAmelCase__).to_tuple()
self.assertEqual(
len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output_loaded, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
A__ = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ = getattr(UpperCAmelCase__ , UpperCAmelCase__)
A__ = pt_model_class(UpperCAmelCase__).eval()
A__ = model_class(UpperCAmelCase__ , dtype=jnp.floataa)
A__ = load_flax_weights_in_pytorch_model(UpperCAmelCase__ , fx_model.params)
A__ , A__ = pt_inputs['''input_ids'''].shape
A__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase__):
A__ = 0
A__ = 1
A__ = 0
A__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A__ = pt_model(**UpperCAmelCase__).to_tuple()
A__ = fx_model(**UpperCAmelCase__).to_tuple()
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase__)
A__ = pt_model_class.from_pretrained(UpperCAmelCase__ , from_flax=UpperCAmelCase__)
with torch.no_grad():
A__ = pt_model_loaded(**UpperCAmelCase__).to_tuple()
self.assertEqual(
len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@tooslow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''')
A__ = model(np.ones((1, 1)))
self.assertIsNotNone(UpperCAmelCase__)
| 231 | 1 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | """simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a)
class UpperCAmelCase_ ( _a):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase__ : str = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase__ : ClassVar[Features] = Features({"text": Value("string")})
lowerCamelCase__ : ClassVar[Features] = Features({"labels": ClassLabel})
lowerCamelCase__ : str = "text"
lowerCamelCase__ : str = "labels"
def _UpperCAmelCase ( self , a ) -> Tuple:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , a ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowercase__ : Optional[Any] = copy.deepcopy(self )
lowercase__ : Optional[Any] = self.label_schema.copy()
lowercase__ : Any = features[self.label_column]
lowercase__ : Optional[Any] = label_schema
return task_template
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 77 | 0 |
def _a ( lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square(lowerCamelCase: int , lowerCamelCase: int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__A = update_area_of_max_square(lowerCamelCase , col + 1 )
__A = update_area_of_max_square(row + 1 , col + 1 )
__A = update_area_of_max_square(row + 1 , lowerCamelCase )
if mat[row][col]:
__A = 1 + min([right, diagonal, down] )
__A = max(largest_square_area[0] , lowerCamelCase )
return sub_problem_sol
else:
return 0
__A = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__A = update_area_of_max_square_using_dp_array(lowerCamelCase , col + 1 , lowerCamelCase )
__A = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCamelCase )
__A = update_area_of_max_square_using_dp_array(row + 1 , lowerCamelCase , lowerCamelCase )
if mat[row][col]:
__A = 1 + min([right, diagonal, down] )
__A = max(largest_square_area[0] , lowerCamelCase )
__A = sub_problem_sol
return sub_problem_sol
else:
return 0
__A = [0]
__A = [[-1] * cols for _ in range(lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCamelCase )
return largest_square_area[0]
def _a ( lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: list[list[int]] ) -> int:
'''simple docstring'''
__A = [[0] * (cols + 1) for _ in range(rows + 1 )]
__A = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__A = dp_array[row][col + 1]
__A = dp_array[row + 1][col + 1]
__A = dp_array[row + 1][col]
if mat[row][col] == 1:
__A = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__A = max(dp_array[row][col] , lowerCamelCase )
else:
__A = 0
return largest_square_area
def _a ( lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: list[list[int]] ) -> int:
'''simple docstring'''
__A = [0] * (cols + 1)
__A = [0] * (cols + 1)
__A = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__A = current_row[col + 1]
__A = next_row[col + 1]
__A = next_row[col]
if mat[row][col] == 1:
__A = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__A = max(current_row[col] , lowerCamelCase )
else:
__A = 0
__A = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 250 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case__ : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case__ : Any = 'main'
# Default branch name
snake_case__ : Union[str, Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
snake_case__ : Optional[int] = 'aaaaaaa'
# This commit does not exist, so we should 404.
snake_case__ : int = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case__ : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def _a ( ) -> Tuple:
'''simple docstring'''
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def _a ( ) -> Optional[int]:
'''simple docstring'''
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Any )-> Optional[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class A_ ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :str , _UpperCamelCase :str )-> Optional[int]:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> Union[str, Any]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :int , _UpperCamelCase :Union[str, Any] )-> int:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def _lowerCAmelCase (self :int )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_tf
def _lowerCAmelCase (self :Any )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_flax
def _lowerCAmelCase (self :Optional[int] )-> Dict:
# Flax models don't have labels
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
| 250 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[int] = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'gpt_neox_japanese'
def __init__( self , _lowerCamelCase=3_2000 , _lowerCamelCase=2560 , _lowerCamelCase=32 , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=1.00 , _lowerCamelCase=1_0000 , _lowerCamelCase=2048 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=3_1996 , _lowerCamelCase=3_1999 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , **_lowerCamelCase , ):
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Optional[Any] = vocab_size
a :int = max_position_embeddings
a :Optional[int] = hidden_size
a :Optional[Any] = num_hidden_layers
a :Any = num_attention_heads
a :Any = intermediate_multiple_size
a :Optional[int] = hidden_act
a :Tuple = rotary_pct
a :Optional[int] = rotary_emb_base
a :Any = initializer_range
a :List[str] = layer_norm_eps
a :List[str] = use_cache
a :Tuple = attention_dropout
a :List[str] = hidden_dropout
| 94 |
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( __a : Optional[Any] ) -> List[str]:
"""simple docstring"""
for param in module.parameters():
_a : List[str] = False
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_a : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_a : Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Tuple = plt.imshow(lowerCAmelCase__ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase__ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase__ )
plt.show()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Optional[Any] = datetime.now()
_a : int = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 358 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 0 |
"""simple docstring"""
import random
class __A :
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : str ) ->tuple[list[int], list[int]]:
"""simple docstring"""
snake_case_ = [ord(UpperCAmelCase_ ) for i in text]
snake_case_ = []
snake_case_ = []
for i in plain:
snake_case_ = random.randint(1 , 300 )
snake_case_ = (i + k) * k
cipher.append(UpperCAmelCase_ )
key.append(UpperCAmelCase_ )
return cipher, key
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) ->str:
"""simple docstring"""
snake_case_ = []
for i in range(len(UpperCAmelCase_ ) ):
snake_case_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(UpperCAmelCase_ ) )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 347 | 1 |
def _lowerCAmelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _lowerCAmelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 0
while number > 0:
SCREAMING_SNAKE_CASE = number % 1_0
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def _lowerCAmelCase (UpperCAmelCase__ : int = 1_0_0 ):
SCREAMING_SNAKE_CASE = factorial(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = split_and_add(UpperCAmelCase__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 371 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase :
lowercase__ : Dict = LEDConfig
lowercase__ : List[str] = {}
lowercase__ : Union[str, Any] = """gelu"""
def __init__( self : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[int]=7 , _UpperCamelCase : int=True , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Dict=99 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Union[str, Any]=37 , _UpperCamelCase : str=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Union[str, Any]=20 , _UpperCamelCase : str=2 , _UpperCamelCase : Optional[Any]=1 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : int=4 , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(_UpperCamelCase )[:, :-1], tf.ones_like(_UpperCamelCase )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def __snake_case( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFLEDModel(config=_UpperCamelCase ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-3 )
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase__ : List[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase__ : int = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ : List[Any] = True
lowercase__ : List[str] = False
lowercase__ : List[str] = False
lowercase__ : Union[str, Any] = False
def __snake_case( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase )
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCamelCase )
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_UpperCamelCase : Dict ):
SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_UpperCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_decoder_attentions_output(_UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __snake_case( self : str ) -> str:
'''simple docstring'''
pass
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
return tf.constant(UpperCAmelCase__ , dtype=tf.intaa )
_lowerCamelCase : str = 1e-4
@slow
@require_tf
class lowercase ( unittest.TestCase ):
def __snake_case( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1_024, 768)
self.assertEqual(output.shape , _UpperCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-3 )
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _UpperCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-3 , rtol=1e-3 )
| 206 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__snake_case : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__snake_case : List[str] = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__snake_case : List[Any] = {
"unc-nlp/lxmert-base-uncased": 512,
}
__snake_case : List[str] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = LxmertTokenizer
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Optional[Any]="[UNK]" , _SCREAMING_SNAKE_CASE: Any="[SEP]" , _SCREAMING_SNAKE_CASE: Dict="[PAD]" , _SCREAMING_SNAKE_CASE: int="[CLS]" , _SCREAMING_SNAKE_CASE: Optional[Any]="[MASK]" , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , _SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get("strip_accents" , _SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__lowerCAmelCase : List[Any] = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("type"))
__lowerCAmelCase : Optional[int] = do_lower_case
__lowerCAmelCase : Optional[Any] = strip_accents
__lowerCAmelCase : List[Any] = tokenize_chinese_chars
__lowerCAmelCase : Any = normalizer_class(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = do_lower_case
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Dict=None) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
__lowerCAmelCase : str = [self.sep_token_id]
__lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE)
return tuple(_SCREAMING_SNAKE_CASE) | 269 |
'''simple docstring'''
from math import factorial
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 125 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: List[str] , UpperCAmelCase_: str = "▁" , UpperCAmelCase_: bool = True , UpperCAmelCase_: Union[str, AddedToken] = "<unk>" , UpperCAmelCase_: Union[str, AddedToken] = "</s>" , UpperCAmelCase_: Union[str, AddedToken] = "<pad>" , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
_SCREAMING_SNAKE_CASE = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_SCREAMING_SNAKE_CASE = token_dict["""token"""]
_SCREAMING_SNAKE_CASE = Tokenizer(Unigram() )
_SCREAMING_SNAKE_CASE = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
_SCREAMING_SNAKE_CASE = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase_ ),
pre_tokenizers.Punctuation(),
] )
_SCREAMING_SNAKE_CASE = decoders.Metaspace(replacement=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = TemplateProcessing(
single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
_SCREAMING_SNAKE_CASE = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: Union[str, List[str]] , UpperCAmelCase_: int = 8_000 , UpperCAmelCase_: bool = True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase_ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase_ , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [files]
self._tokenizer.train(UpperCAmelCase_ , trainer=UpperCAmelCase_ )
self.add_unk_id()
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[Iterator[str], Iterator[Iterator[str]]] , UpperCAmelCase_: int = 8_000 , UpperCAmelCase_: bool = True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase_ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase_ , )
self._tokenizer.train_from_iterator(UpperCAmelCase_ , trainer=UpperCAmelCase_ )
self.add_unk_id()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = json.loads(self._tokenizer.to_str() )
_SCREAMING_SNAKE_CASE = self.special_tokens["""unk"""]["""id"""]
_SCREAMING_SNAKE_CASE = Tokenizer.from_str(json.dumps(UpperCAmelCase_ ) )
| 358 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( snake_case__ ,snake_case__="shi-labs/oneformer_demo" ) -> Union[str, Any]:
"""simple docstring"""
with open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ,"""r""" ) as f:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for key, info in class_info.items():
_SCREAMING_SNAKE_CASE = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(snake_case__ ) )
_SCREAMING_SNAKE_CASE = thing_ids
_SCREAMING_SNAKE_CASE = class_names
return metadata
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any]=7 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=30 , UpperCAmelCase_: List[str]=400 , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_: int=[0.5, 0.5, 0.5] , UpperCAmelCase_: List[str]=10 , UpperCAmelCase_: Optional[int]=False , UpperCAmelCase_: Optional[int]=255 , UpperCAmelCase_: Tuple="shi-labs/oneformer_demo" , UpperCAmelCase_: Union[str, Any]="ade20k_panoptic.json" , UpperCAmelCase_: Union[str, Any]=10 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = class_info_file
_SCREAMING_SNAKE_CASE = prepare_metadata(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = num_text
_SCREAMING_SNAKE_CASE = repo_path
# for the post_process_functions
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = do_reduce_labels
_SCREAMING_SNAKE_CASE = ignore_index
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase ( self: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[str]=False ):
'''simple docstring'''
if not batched:
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w )
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h )
else:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__snake_case : int = image_processing_class
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """ignore_index""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """class_info_file""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """num_text""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """repo_path""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """metadata""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_reduce_labels""" ) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: Any=False , UpperCAmelCase_: str="np" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_SCREAMING_SNAKE_CASE = self.image_processing_tester.num_labels
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ )
if with_segmentation_maps:
_SCREAMING_SNAKE_CASE = num_labels
if is_instance_map:
_SCREAMING_SNAKE_CASE = list(range(UpperCAmelCase_ ) ) * 2
_SCREAMING_SNAKE_CASE = dict(enumerate(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_SCREAMING_SNAKE_CASE = [Image.fromarray(UpperCAmelCase_ ) for annotation in annotations]
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , UpperCAmelCase_ , return_tensors="""pt""" , instance_id_to_semantic_id=UpperCAmelCase_ , pad_and_return_pixel_mask=UpperCAmelCase_ , )
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Any ):
'''simple docstring'''
def common(UpperCAmelCase_: List[str]=False , UpperCAmelCase_: Optional[int]=None ):
_SCREAMING_SNAKE_CASE = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCAmelCase_ , is_instance_map=UpperCAmelCase_ , segmentation_type=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = inputs["""mask_labels"""]
_SCREAMING_SNAKE_CASE = inputs["""class_labels"""]
_SCREAMING_SNAKE_CASE = inputs["""pixel_values"""]
_SCREAMING_SNAKE_CASE = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCAmelCase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCAmelCase_ )
common(is_instance_map=UpperCAmelCase_ , segmentation_type="""pil""" )
common(is_instance_map=UpperCAmelCase_ , segmentation_type="""pil""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.zeros((20, 50) )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = binary_mask_to_rle(UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
_SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_SCREAMING_SNAKE_CASE = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase_ , target_sizes=UpperCAmelCase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
_SCREAMING_SNAKE_CASE = image_processor.post_process_instance_segmentation(UpperCAmelCase_ , threshold=0 )
self.assertTrue(len(UpperCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCAmelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
_SCREAMING_SNAKE_CASE = image_processor.post_process_panoptic_segmentation(UpperCAmelCase_ , threshold=0 )
self.assertTrue(len(UpperCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCAmelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 125 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
snake_case : int = ""
snake_case : Optional[int] = ""
snake_case : List[str] = ""
snake_case : int = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = get_dataset(_snake_case , _snake_case )
print("Processing..." )
__magic_name__ , __magic_name__ , __magic_name__ : Dict = update_image_and_anno(_snake_case , _snake_case , _snake_case )
for index, image in enumerate(_snake_case ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__magic_name__ : Any = random_chars(32 )
__magic_name__ : Any = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
__magic_name__ : Tuple = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , _snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(_snake_case )} with {file_name}''' )
__magic_name__ : List[str] = []
for anno in new_annos[index]:
__magic_name__ : Optional[int] = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(_snake_case )
with open(F'''/{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> tuple[list, list]:
'''simple docstring'''
__magic_name__ : Tuple = []
__magic_name__ : str = []
for label_file in glob.glob(os.path.join(_snake_case , "*.txt" ) ):
__magic_name__ : List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_snake_case ) as in_file:
__magic_name__ : List[Any] = in_file.readlines()
__magic_name__ : Tuple = os.path.join(_snake_case , F'''{label_name}.jpg''' )
__magic_name__ : int = []
for obj_list in obj_lists:
__magic_name__ : Optional[Any] = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_snake_case )
labels.append(_snake_case )
return img_paths, labels
def lowerCAmelCase_ ( _snake_case : list , _snake_case : list , _snake_case : int = 1 ) -> tuple[list, list, list]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
__magic_name__ : Tuple = []
__magic_name__ : int = []
for idx in range(len(_snake_case ) ):
__magic_name__ : Tuple = []
__magic_name__ : List[Any] = img_list[idx]
path_list.append(_snake_case )
__magic_name__ : int = anno_list[idx]
__magic_name__ : Optional[int] = cva.imread(_snake_case )
if flip_type == 1:
__magic_name__ : List[Any] = cva.flip(_snake_case , _snake_case )
for bbox in img_annos:
__magic_name__ : Optional[int] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__magic_name__ : Union[str, Any] = cva.flip(_snake_case , _snake_case )
for bbox in img_annos:
__magic_name__ : Tuple = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_snake_case )
new_imgs_list.append(_snake_case )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase_ ( _snake_case : int = 32 ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__magic_name__ : List[str] = ascii_lowercase + digits
return "".join(random.choice(_snake_case ) for _ in range(_snake_case ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 281 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 | 1 |
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : float , __A : bool = False ) -> dict:
_SCREAMING_SNAKE_CASE = {i: [] for i in range(__A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__A ):
for j in range(i + 1 , __A ):
if random.random() < probability:
graph[i].append(__A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__A )
return graph
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> dict:
return {
i: [j for j in range(__A ) if i != j] for i in range(__A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : float , __A : bool = False ) -> dict:
_SCREAMING_SNAKE_CASE = {i: [] for i in range(__A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__A ):
for j in range(i + 1 , __A ):
if random.random() < probability:
graph[i].append(__A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__A )
return graph
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> dict:
return {
i: [j for j in range(__A ) if i != j] for i in range(__A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
_lowerCAmelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
_lowerCAmelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class A ( lowerCAmelCase_ ):
'''simple docstring'''
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
A = BartTokenizer
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case , **_snake_case , )
__UpperCamelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _snake_case ) != add_prefix_space:
__UpperCamelCase : Tuple = getattr(_snake_case , pre_tok_state.pop("type" ) )
__UpperCamelCase : List[Any] = add_prefix_space
__UpperCamelCase : str = pre_tok_class(**_snake_case )
__UpperCamelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCamelCase : Any = 'post_processor'
__UpperCamelCase : List[Any] = getattr(self.backend_tokenizer , _snake_case , _snake_case )
if tokenizer_component_instance:
__UpperCamelCase : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase : Tuple = tuple(state["sep"] )
if "cls" in state:
__UpperCamelCase : Optional[Any] = tuple(state["cls"] )
__UpperCamelCase : int = False
if state.get("add_prefix_space" , _snake_case ) != add_prefix_space:
__UpperCamelCase : int = add_prefix_space
__UpperCamelCase : Tuple = True
if state.get("trim_offsets" , _snake_case ) != trim_offsets:
__UpperCamelCase : Optional[Any] = trim_offsets
__UpperCamelCase : Tuple = True
if changes_to_apply:
__UpperCamelCase : Union[str, Any] = getattr(_snake_case , state.pop("type" ) )
__UpperCamelCase : List[str] = component_class(**_snake_case )
setattr(self.backend_tokenizer , _snake_case , _snake_case )
@property
def a_ (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : List[str] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else value
__UpperCamelCase : Union[str, Any] = value
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> BatchEncoding:
__UpperCamelCase : str = kwargs.get("is_split_into_words" , _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_snake_case , **_snake_case )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> BatchEncoding:
__UpperCamelCase : List[str] = kwargs.get("is_split_into_words" , _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*_snake_case , **_snake_case )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
__UpperCamelCase : Dict = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Dict:
__UpperCamelCase : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
__UpperCamelCase : Any = [self.sep_token_id]
__UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 298 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Optional[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 0 |
import numpy
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : numpy.ndarray , lowercase_ : numpy.ndarray):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.random.rand(3 , 1)
# Real output values provided.
SCREAMING_SNAKE_CASE_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE_ : Dict = numpy.zeros(output_array.shape)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
SCREAMING_SNAKE_CASE_ : Tuple = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
SCREAMING_SNAKE_CASE_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : numpy.ndarray , lowercase_ : int , lowercase_ : bool):
'''simple docstring'''
for iteration in range(1 , iterations + 1):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE_ : str = numpy.mean(numpy.square(output - self.feedforward()))
print(F'Iteration {iteration} Loss: {loss}')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : numpy.ndarray):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = input_arr
SCREAMING_SNAKE_CASE_ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
SCREAMING_SNAKE_CASE_ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def _A (__a ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def _A (__a ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE_ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=__a , output_array=__a )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__a , iterations=10 , give_loss=__a )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 360 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = old_name
if "patch_embed" in old_name:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = old_name.split("." )
if layer == "0":
lowerCAmelCase_ = old_name.replace("0" , "convolution1" )
elif layer == "1":
lowerCAmelCase_ = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
lowerCAmelCase_ = old_name.replace("3" , "convolution2" )
else:
lowerCAmelCase_ = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , __lowerCAmelCase ):
lowerCAmelCase_ = r"\b\d{2}\b"
if bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ):
lowerCAmelCase_ = re.search(r"\d\.\d\d." , __lowerCAmelCase ).group()
else:
lowerCAmelCase_ = re.search(r"\d\.\d." , __lowerCAmelCase ).group()
if int(match[0] ) < 6:
lowerCAmelCase_ = old_name.replace(__lowerCAmelCase , "" )
lowerCAmelCase_ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
lowerCAmelCase_ = "intermediate_stages." + trimmed_name
else:
lowerCAmelCase_ = old_name.replace(__lowerCAmelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
lowerCAmelCase_ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
lowerCAmelCase_ = str(int(match[2] ) - num_meta4D_last_stage )
lowerCAmelCase_ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
lowerCAmelCase_ = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
lowerCAmelCase_ = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
lowerCAmelCase_ = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
lowerCAmelCase_ = trimmed_name.replace("fc2" , "linear_out" )
lowerCAmelCase_ = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , __lowerCAmelCase ):
lowerCAmelCase_ = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
lowerCAmelCase_ = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCAmelCase_ = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCAmelCase_ = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
lowerCAmelCase_ = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
lowerCAmelCase_ = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
lowerCAmelCase_ = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
lowerCAmelCase_ = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCAmelCase_ = new_name.replace("norm" , "layernorm" )
lowerCAmelCase_ = "efficientformer." + new_name
else:
lowerCAmelCase_ = "efficientformer.encoder." + new_name
return new_name
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
lowerCAmelCase_ = checkpoint.pop(__lowerCAmelCase )
lowerCAmelCase_ = val
return checkpoint
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
def lowerCamelCase__ ( __lowerCAmelCase : Path , __lowerCAmelCase : Path , __lowerCAmelCase : Path , __lowerCAmelCase : bool ):
"""simple docstring"""
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
lowerCAmelCase_ = EfficientFormerConfig.from_json_file(__lowerCAmelCase )
lowerCAmelCase_ = EfficientFormerForImageClassificationWithTeacher(__lowerCAmelCase )
lowerCAmelCase_ = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
lowerCAmelCase_ = config.depths[-1] - config.num_metaad_blocks + 1
lowerCAmelCase_ = convert_torch_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowerCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = 256
lowerCAmelCase_ = 224
lowerCAmelCase_ = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
lowerCAmelCase_ = processor(images=__lowerCAmelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
lowerCAmelCase_ = Compose(
[
Resize(__lowerCAmelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
Normalize(__lowerCAmelCase , __lowerCAmelCase ),
] )
lowerCAmelCase_ = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = model(__lowerCAmelCase )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = (1, 1000)
if "l1" in model_name:
lowerCAmelCase_ = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCAmelCase_ = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCAmelCase_ = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowerCAmelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_A = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 231 |
import requests
_A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 231 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[Any] = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365 |
def __lowerCamelCase ( __a :float , __a :list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
A__ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__a ) )
return round(__a , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = ShapEPipeline
UpperCamelCase__ = ['''prompt''']
UpperCamelCase__ = ['''prompt''']
UpperCamelCase__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ = False
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return 8
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
torch.manual_seed(0 )
lowercase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
torch.manual_seed(0 )
lowercase_ : int = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase_ : Tuple = PriorTransformer(**lowercase_ )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
torch.manual_seed(0 )
lowercase_ : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase_ : Dict = ShapERenderer(**lowercase_ )
return model
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[Any] = self.dummy_prior
lowercase_ : str = self.dummy_text_encoder
lowercase_ : int = self.dummy_tokenizer
lowercase_ : Union[str, Any] = self.dummy_renderer
lowercase_ : Dict = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowercase_ , clip_sample=lowercase_ , clip_sample_range=1.0 , )
lowercase_ : Any = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tuple , lowercase_ : Tuple=0 ):
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : Any = torch.manual_seed(lowercase_ )
else:
lowercase_ : Dict = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : Optional[Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[Any] = """cpu"""
lowercase_ : Dict = self.get_dummy_components()
lowercase_ : Union[str, Any] = self.pipeline_class(**lowercase_ )
lowercase_ : str = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : int = pipe(**self.get_dummy_inputs(lowercase_ ) )
lowercase_ : Optional[int] = output.images[0]
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase_ : Optional[Any] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : int = torch_device == """cpu"""
lowercase_ : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase_ , relax_max_difference=lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**lowercase_ )
lowercase_ : Any = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : str = 1
lowercase_ : List[str] = 2
lowercase_ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase_ : List[str] = batch_size * [inputs[key]]
lowercase_ : str = pipe(**lowercase_ , num_images_per_prompt=lowercase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
lowercase_ : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
lowercase_ : List[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Any = torch.Generator(device=lowercase_ ).manual_seed(0 )
lowercase_ : str = pipe(
"""a shark""" , generator=lowercase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 239 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowercase : Optional[int] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowercase : List[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowercase : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Any=None , lowercase_ : int="auto" , lowercase_ : Tuple=-1 , lowercase_ : str=0.9 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[str]=500 , lowercase_ : Union[str, Any]="gpt2-large" , lowercase_ : List[Any]=-1 , lowercase_ : str=1024 , lowercase_ : List[str]=25 , lowercase_ : str=5 , lowercase_ : List[Any]=True , lowercase_ : Tuple=25 , ):
lowercase_ : List[str] = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 239 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _lowerCAmelCase ):
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''Pix2StructImageProcessor'''
lowercase__ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ = False
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : Tuple , snake_case_ : Tuple=None , snake_case_ : Any = None , snake_case_ : Optional[Any] = True , snake_case_ : Any = False , snake_case_ : Dict = None , snake_case_ : Union[str, Any] = None , snake_case_ : Tuple = 2_048 , snake_case_ : Optional[int] = 0 , snake_case_ : Optional[Any] = None , snake_case_ : str = None , snake_case_ : Optional[int] = False , snake_case_ : str = False , snake_case_ : List[Any] = False , snake_case_ : Optional[int] = False , snake_case_ : str = False , snake_case_ : List[str] = True , snake_case_ : Optional[Any] = None , **snake_case_ : List[str] , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
A__ = self.tokenizer
A__ = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
A__ = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , max_patches=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
# add pixel_values and bbox
A__ = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , max_patches=SCREAMING_SNAKE_CASE_ , header_text=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and not self.image_processor.is_vqa:
A__ = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if "attention_mask" in text_encoding:
A__ = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
A__ = text_encoding.pop("input_ids" )
else:
A__ = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE_ )
return encoding_image_processor
def __magic_name__ ( self : int , *snake_case_ : Optional[int] , **snake_case_ : int ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self : str , *snake_case_ : Any , **snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def __magic_name__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 366 |
"""simple docstring"""
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
A__ = data
A__ = None
def __repr__( self : Optional[int] ) -> str:
'''simple docstring'''
return F"""Node({self.data})"""
class UpperCAmelCase_ :
def __init__( self : Dict ) -> Any:
'''simple docstring'''
A__ = None
def __iter__( self : List[Any] ) -> Any:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next
def __len__( self : Any ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return "->".join([str(snake_case_ ) for item in self] )
def __getitem__( self : str , snake_case_ : int ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
A__ = self.head
for _ in range(snake_case_ ):
A__ = current.next
A__ = data
def __magic_name__ ( self : List[Any] , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
A__ = Node(snake_case_ )
if self.head is None:
A__ = new_node
elif index == 0:
A__ = self.head # link new_node to head
A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
def __magic_name__ ( self : Dict ) -> None: # print every node data
'''simple docstring'''
print(self )
def __magic_name__ ( self : Dict ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def __magic_name__ ( self : Optional[Any] ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __magic_name__ ( self : Any , snake_case_ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
A__ = self.head # default first node
if index == 0:
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
return delete_node.data
def __magic_name__ ( self : Dict ) -> bool:
'''simple docstring'''
return self.head is None
def __magic_name__ ( self : List[Any] ) -> None:
'''simple docstring'''
A__ = None
A__ = self.head
while current:
# Store the current node's next node.
A__ = current.next
# Make the current node's next point backwards
A__ = prev
# Make the previous node be the current node
A__ = current
# Make the current node the next node (to progress iteration)
A__ = next_node
# Return prev in order to put the head at the end
A__ = prev
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase_ ) == i
linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase_ ) == 9
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = [
-9,
1_00,
Node(77_34_51_12 ),
"dlrow olleH",
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
A__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase_ )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from doctest import testmod
testmod()
A__ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowercase_ )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
A__ = input("Enter New Value: " ).strip()
print("New list:" )
print(lowercase_ )
print(f"""length of linked_list is : {len(lowercase_ )}""" )
if __name__ == "__main__":
main()
| 230 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "dummy_data"
snake_case_ = "datasets"
snake_case_ = False
def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,):
__A = 0
__A = dataset_name
__A = cache_dir
__A = use_local_dummy_data
__A = config
# download_callbacks take a single url as input
__A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__A = str(A )
# to be downloaded
__A = None
__A = None
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if self._dummy_file is None:
__A = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Optional[Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : Tuple ):
__A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__A = cached_path(
A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A )
return os.path.join(A ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : str ):
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : Any ):
if self._bucket_url is None:
__A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Tuple ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A ,A ):
return self.create_dummy_data_dict(A ,A )
elif isinstance(A ,(list, tuple) ):
return self.create_dummy_data_list(A ,A )
else:
return self.create_dummy_data_single(A ,A )
def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ):
return path
def UpperCamelCase_ ( self : str ):
return {}
def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ):
__A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A ,A ):
for single_url in single_urls:
download_callback(A )
else:
__A = single_urls
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A ,A ):
__A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls]
else:
__A = single_urls
__A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) )
__A = value
# make sure that values are unique
if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ):
__A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url )
__A = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__A = [data_url[0]] * len(A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(A )
return dummy_data_list
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Dict ):
pass
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ):
def _iter_archive_members(A : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__A = Path(self.dummy_file ).parent
__A = path.relative_to(A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A )
__A = Path(A )
__A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(A ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
if not isinstance(A ,A ):
__A = [paths]
for path in paths:
if os.path.isfile(A ):
if os.path.basename(A ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A ):
if os.path.basename(A ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(A ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(A ,A )
| 15 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(default="""summarization""", metadata={"""include_in_asdict_even_if_is_default""": True} )
_SCREAMING_SNAKE_CASE = Features({"""text""": Value("""string""" )} )
_SCREAMING_SNAKE_CASE = Features({"""summary""": Value("""string""" )} )
_SCREAMING_SNAKE_CASE = """text"""
_SCREAMING_SNAKE_CASE = """summary"""
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return {self.text_column: "text", self.summary_column: "summary"}
| 363 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> None:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCAmelCase_ : str = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
lowerCAmelCase_ : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : List[str] = [1, 3, 0, 5, 8, 5]
lowercase__ : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 289 | 0 |
"""simple docstring"""
import math
def _A (__a , __a ) -> int:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(snake_case_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCAmelCase_ : str = 'Enter the base and the power separated by a comma: '
UpperCAmelCase_ : Tuple = map(int, input(prompt).split(""","""))
UpperCAmelCase_ : int = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCAmelCase_ : str = res(xa, ya)
UpperCAmelCase_ : Optional[Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 91 |
"""simple docstring"""
import pprint
import requests
_lowercase : Optional[Any] = 'https://zenquotes.io/api'
def lowercase__ ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowercase__ ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_lowercase : int = random_quotes()
pprint.pprint(response)
| 332 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : Optional[Any] = logging.getLogger()
def _a ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("-f" )
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
return args.f
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def A_ ( self : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, "run_glue_deebert.py" )
with patch.object(_UpperCAmelCase, "argv", _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_UpperCAmelCase, 0.666 )
@slow
@require_torch_non_multi_gpu
def A_ ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(_UpperCAmelCase )
| 356 |
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("String lengths must match!" )
SCREAMING_SNAKE_CASE__ : Dict = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCAmelCase_ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = 1
@register_to_config
def __init__( self , _a = 1_0_0_0 , _a = None ) -> Optional[Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCamelCase__ )
# standard deviation of the initial noise distribution
_a : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_a : Optional[int] = 4
# running values
_a : str = []
def __lowercase ( self , _a , _a = None ) -> Tuple:
_a : Optional[int] = num_inference_steps
_a : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_a : Any = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_a : List[Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_a : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
_a : str = (1.0 - self.betas**2) ** 0.5
_a : Any = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_a : Tuple = timesteps.to(lowerCamelCase__ )
_a : Optional[int] = []
def __lowercase ( self , _a , _a , _a , _a = True , ) -> Optional[int]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
_a : int = (self.timesteps == timestep).nonzero().item()
_a : List[Any] = timestep_index + 1
_a : str = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCamelCase__ )
if len(self.ets ) == 1:
_a : str = self.ets[-1]
elif len(self.ets ) == 2:
_a : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_a : Union[str, Any] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
_a : Optional[Any] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
_a : Optional[int] = self._get_prev_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def __lowercase ( self , _a , *_a , **_a ) -> Dict:
return sample
def __lowercase ( self , _a , _a , _a , _a ) -> int:
_a : Any = self.alphas[timestep_index]
_a : Union[str, Any] = self.betas[timestep_index]
_a : Tuple = self.alphas[prev_timestep_index]
_a : Dict = self.betas[prev_timestep_index]
_a : Dict = (sample - sigma * ets) / max(lowerCamelCase__ , 1e-8 )
_a : Any = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 235 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : int , *__lowerCamelCase : str , **__lowerCamelCase : Any ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : str , **__lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : Dict , **__lowerCamelCase : Dict ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : int , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Tuple ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Dict ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : int , *__lowerCamelCase : List[str] , **__lowerCamelCase : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Any , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Any , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : int ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Dict ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Any , **__lowerCamelCase : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : Dict ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : int ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : str ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Dict , **__lowerCamelCase : Any ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Any , *__lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : int , **__lowerCamelCase : str ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Any , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Any ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Any , *__lowerCamelCase : str , **__lowerCamelCase : List[str] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Dict , **__lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : int , **__lowerCamelCase : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Any ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : str , **__lowerCamelCase : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[Any] , *__lowerCamelCase : str , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Any , **__lowerCamelCase : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : Any , **__lowerCamelCase : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Any , **__lowerCamelCase : str ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Any , **__lowerCamelCase : List[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : Tuple , **__lowerCamelCase : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[Any] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : str , **__lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Dict , **__lowerCamelCase : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[str] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Tuple ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : List[str] , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
| 218 |
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def lowercase_ ( self : Optional[int] ) -> list[float]:
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 218 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = "▁"
UpperCAmelCase = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
UpperCAmelCase = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
UpperCAmelCase = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
UpperCAmelCase = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
UpperCAmelCase = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class A_ ( UpperCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = ["""input_ids"""]
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self , snake_case , snake_case=None , snake_case=False , snake_case="utf8" , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case = None , **snake_case , ):
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , vocab_file=__lowercase , encoding=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowercase = do_lower_case
lowercase = sentencepiece_model_ckpt
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase = self.load_vocab(filepath=__lowercase )
else:
lowercase = {self.sp_model.id_to_piece(__lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase = {v: k for k, v in self.vocab.items()}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if text is None:
return None
lowercase = self.tokenize(__lowercase )
lowercase , lowercase = '', []
for i, ch in enumerate(__lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase = self.SP_CHAR_MAPPING.get(__lowercase )
else:
lowercase = unicodedata.normalize('NFKC' , __lowercase )
if self.is_whitespace(__lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowercase ) )
lowercase , lowercase , lowercase = normalized_text, [], 0
if self.do_lower_case:
lowercase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase = token[1:]
lowercase = text[offset:].index(__lowercase ) + offset
lowercase = start + len(__lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase = end
return token_mapping
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.vocab )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , snake_case ):
lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "".join((self.SP_CHAR_MAPPING.get(__lowercase , __lowercase ) for c in text) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=False , snake_case=64 , snake_case=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowercase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowercase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowercase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowercase = self.sp_model.EncodeAsPieces(__lowercase )
else:
lowercase = self.sp_model.SampleEncodeAsPieces(__lowercase , __lowercase , __lowercase )
lowercase = []
for pi, piece in enumerate(__lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowercase ) and pi != 0:
new_pieces.append(__lowercase )
continue
else:
continue
lowercase = 0
for i, chunk in enumerate(__lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowercase ) or self.is_punct(__lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowercase )
lowercase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase = i
if len(__lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ''.join(__lowercase ).replace(__lowercase , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.convert_ids_to_tokens(__lowercase )
lowercase = ''.join(__lowercase ).replace(__lowercase , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.vocab.get(__lowercase , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.reverse_vocab.get(__lowercase , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowercase ) + 1) + [1] * (len(__lowercase ) + 3)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowercase ) == 1:
lowercase = unicodedata.category(__lowercase )
if cat == "Zs":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = {}
with io.open(__lowercase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(__lowercase ):
lowercase = line.rstrip('\n' )
lowercase = int(__lowercase )
return token_to_idx
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = 0
if os.path.isdir(__lowercase ):
lowercase = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowercase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(__lowercase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
lowercase = token_index
writer.write(token + '\n' )
index += 1
lowercase = os.path.join(__lowercase , 'sentencepiece.bpe.model' )
with open(__lowercase , 'wb' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (vocab_file,)
| 195 |
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
_enforce_args(_A , _A )
if n == 0:
return 0
snake_case_ = float("-inf" )
for i in range(1 , n + 1 ):
snake_case_ = max(
_A , prices[i - 1] + naive_cut_rod_recursive(n - i , _A ) )
return max_revue
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
_enforce_args(_A , _A )
snake_case_ = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_A , _A , _A )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
snake_case_ = float("-inf" )
for i in range(1 , n + 1 ):
snake_case_ = max(
_A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _A , _A ) , )
snake_case_ = max_revenue
return max_rev[n]
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
_enforce_args(_A , _A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
snake_case_ = [float("-inf" ) for _ in range(n + 1 )]
snake_case_ = 0
for i in range(1 , n + 1 ):
snake_case_ = max_rev[i]
for j in range(1 , i + 1 ):
snake_case_ = max(_A , prices[j - 1] + max_rev[i - j] )
snake_case_ = max_revenue_i
return max_rev[n]
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if n < 0:
snake_case_ = f"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(_A )
if n > len(_A ):
snake_case_ = (
"Each integral piece of rod must have a corresponding price. "
f"Got n = {n} but length of prices = {len(_A )}"
)
raise ValueError(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = [6, 10, 12, 15, 20, 23]
snake_case_ = len(_A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
snake_case_ = 36
snake_case_ = top_down_cut_rod(_A , _A )
snake_case_ = bottom_up_cut_rod(_A , _A )
snake_case_ = naive_cut_rod_recursive(_A , _A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 187 | 0 |
def UpperCamelCase ( _A : List[str] , _A : Union[str, Any] , _A : Dict )-> Tuple:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_A , n - 1 , _A ) * a) % mod
else:
A__ = binary_exponentiation(_A , n / 2 , _A )
return (b * b) % mod
# a prime number
UpperCAmelCase_ : Optional[Any] = 701
UpperCAmelCase_ : str = 1_000_000_000
UpperCAmelCase_ : int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 198 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase_ : Dict = False
@skip_mps
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowerCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
A__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
A__ = CLIPTextModel(UpperCAmelCase__ )
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = A__ = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def __A ( self ):
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_dummy_inputs(UpperCAmelCase__ )
A__ = pipe(**UpperCAmelCase__ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3 )
def __A ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __A ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __A ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __A ( self ):
super().test_save_load_local(expected_max_difference=5e-4 )
def __A ( self ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ = torch.manual_seed(51 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("cuda" )
A__ = "a painting of an elephant with glasses"
A__ = [5, 7]
A__ = pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 198 | 1 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ):
UpperCAmelCase__ = cipher_alphabet or [chr(_lowercase ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase__ = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
UpperCAmelCase__ = frequencies_dict
if not case_sensitive:
UpperCAmelCase__ = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase__ = {}
# cycle through all of the shifts
for shift in range(len(_lowercase ) ):
UpperCAmelCase__ = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase__ = (alphabet_letters.index(letter.lower() ) - shift) % len(
_lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase__ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase__ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase__ = decrypted_with_shift.lower().count(_lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase__ = decrypted_with_shift.count(_lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase__ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCamelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase__ = min(
_lowercase , key=_lowercase , )
# Get all the data from the most likely cipher (key, decoded message)
(
UpperCAmelCase__
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 98 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 0 |
'''simple docstring'''
from math import pi, sqrt, tan
def _lowerCAmelCase ( __snake_case : float ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__A : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__snake_case , 2 ) * torus_radius * tube_radius
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _lowerCAmelCase ( __snake_case : float ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__A : int = (sidea + sidea + sidea) / 2
__A : Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _lowerCAmelCase ( __snake_case : int , __snake_case : float ) -> float:
if not isinstance(__snake_case , __snake_case ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""") | 190 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> float:
def get_matched_characters(__snake_case : str , __snake_case : str ) -> str:
__A : Optional[int] = []
__A : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A : str = int(max(0 , i - limit ) )
__A : str = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__snake_case )
__A : Dict = f'{_stra[0:_stra.index(__snake_case )]} {_stra[_stra.index(__snake_case ) + 1:]}'
return "".join(__snake_case )
# matching characters
__A : Any = get_matched_characters(__snake_case , __snake_case )
__A : str = get_matched_characters(__snake_case , __snake_case )
__A : str = len(__snake_case )
# transposition
__A : Tuple = (
len([(ca, ca) for ca, ca in zip(__snake_case , __snake_case ) if ca != ca] ) // 2
)
if not match_count:
__A : int = 0.0
else:
__A : Union[str, Any] = (
1
/ 3
* (
match_count / len(__snake_case )
+ match_count / len(__snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A : Tuple = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world''')) | 190 | 1 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> str:
__a = old_name
if "patch_embed" in old_name:
__a , __a , __a = old_name.split('''.''' )
if layer == "0":
__a = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__a = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__a = old_name.replace('''3''' , '''convolution2''' )
else:
__a = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , lowerCAmelCase__ ):
__a = r'''\b\d{2}\b'''
if bool(re.search(lowerCAmelCase__ , lowerCAmelCase__ ) ):
__a = re.search(r'''\d\.\d\d.''' , lowerCAmelCase__ ).group()
else:
__a = re.search(r'''\d\.\d.''' , lowerCAmelCase__ ).group()
if int(match[0] ) < 6:
__a = old_name.replace(lowerCAmelCase__ , '''''' )
__a = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__a = '''intermediate_stages.''' + trimmed_name
else:
__a = old_name.replace(lowerCAmelCase__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__a = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__a = str(int(match[2] ) - num_meta4D_last_stage )
__a = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__a = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__a = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__a = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__a = trimmed_name.replace('''fc2''' , '''linear_out''' )
__a = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , lowerCAmelCase__ ):
__a = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__a = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__a = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__a = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__a = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__a = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a = new_name.replace('''norm''' , '''layernorm''' )
__a = '''efficientformer.''' + new_name
else:
__a = '''efficientformer.encoder.''' + new_name
return new_name
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ) -> Tuple:
for key in checkpoint.copy().keys():
__a = checkpoint.pop(lowerCAmelCase__ )
__a = val
return checkpoint
def lowercase ( ) -> Any:
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
def lowercase ( lowerCAmelCase__ : Path , lowerCAmelCase__ : Path , lowerCAmelCase__ : Path , lowerCAmelCase__ : bool ) -> Any:
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
__a = EfficientFormerConfig.from_json_file(lowerCAmelCase__ )
__a = EfficientFormerForImageClassificationWithTeacher(lowerCAmelCase__ )
__a = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__a = config.depths[-1] - config.num_metaad_blocks + 1
__a = convert_torch_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
__a = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__a = prepare_img()
__a = 256
__a = 224
__a = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__a = processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__a = Compose(
[
Resize(lowerCAmelCase__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(lowerCAmelCase__ ),
ToTensor(),
Normalize(lowerCAmelCase__ , lowerCAmelCase__ ),
] )
__a = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
__a = model(lowerCAmelCase__ )
__a = outputs.logits
__a = (1, 1000)
if "l1" in model_name:
__a = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(lowerCAmelCase__ )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase__ , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowercase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 45 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
__A = {
"google/rembert": 256,
}
__A = "▁"
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = RemBertTokenizer
def __init__( self : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int="[CLS]" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Dict="[SEP]" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Any="[CLS]" , UpperCamelCase__ : str="[MASK]" , **UpperCamelCase__ : Optional[Any] , )-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = do_lower_case
__lowerCAmelCase: int = remove_space
__lowerCAmelCase: int = keep_accents
__lowerCAmelCase: str = vocab_file
__lowerCAmelCase: Tuple = False if not self.vocab_file else True
def lowercase_ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
__lowerCAmelCase: Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False)-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__)) + [1] + ([0] * len(UpperCamelCase__)) + [1]
return [1] + ([0] * len(UpperCamelCase__)) + [1]
def lowercase_ ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
__lowerCAmelCase: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase__))
return
__lowerCAmelCase: Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase__):
copyfile(self.vocab_file , UpperCamelCase__)
return (out_vocab_file,)
| 217 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
@property
def __A ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ):
A__ : Dict = ort.SessionOptions()
A__ : str = False
return options
def __A ( self ):
A__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
A__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
A__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
A__ : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__ )
A__ : Any = """A red cat sitting on a park bench"""
A__ : List[str] = np.random.RandomState(0 )
A__ : Tuple = pipe(
prompt=A__ , image=A__ , mask_image=A__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=A__ , output_type="""np""" , )
A__ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 141 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : Optional[Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Optional[Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_MAPPING
A_ : Any = auto_class_update(FlaxAutoModel)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Optional[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : List[str] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 141 | 1 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=0.6 , SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
snake_case : Any = parent
snake_case : str = batch_size
snake_case : Optional[int] = image_size
snake_case : Optional[int] = patch_size
snake_case : Optional[Any] = num_channels
snake_case : Dict = is_training
snake_case : Union[str, Any] = use_labels
snake_case : List[str] = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : List[str] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Optional[Any] = type_sequence_label_size
snake_case : List[Any] = initializer_range
snake_case : Any = mask_ratio
snake_case : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case : List[Any] = (image_size // patch_size) ** 2
snake_case : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Tuple = None
if self.use_labels:
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Dict = ViTMAEModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
snake_case : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[Any] = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
snake_case : List[str] = model(SCREAMING_SNAKE_CASE )
snake_case : str = (self.image_size // self.patch_size) ** 2
snake_case : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case : str = 1
snake_case : Union[str, Any] = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Dict = model(SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : List[Any] = config_and_inputs
snake_case : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
a__ : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a__ : List[str] = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a__ : Optional[Any] = False
a__ : str = False
a__ : List[str] = False
a__ : int = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = ViTMAEModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[Any] = model_class(SCREAMING_SNAKE_CASE )
snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Dict = [*signature.parameters.keys()]
snake_case : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
np.random.seed(2 )
snake_case : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
snake_case : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case : Tuple = pt_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
snake_case : Tuple = outputs[0].cpu().numpy()
snake_case : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
snake_case : Optional[int] = after_outputs[0].cpu().numpy()
snake_case : int = 0
snake_case : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Tuple = ViTMAEModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ):
snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
np.random.seed(2 )
snake_case : List[Any] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.default_image_processor
snake_case : Tuple = prepare_img()
snake_case : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case : int = ViTMAEConfig()
snake_case : Optional[int] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , noise=torch.from_numpy(SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE ) )
# verify the logits
snake_case : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
| 148 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['ConditionalDetrFeatureExtractor']
UpperCamelCase = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def a ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
lowerCAmelCase__ = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase__ = VideoClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , top_k=2 )
lowerCAmelCase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
for example in examples:
lowerCAmelCase__ = video_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"score": ANY(SCREAMING_SNAKE_CASE__ ), "label": ANY(SCREAMING_SNAKE_CASE__ )},
{"score": ANY(SCREAMING_SNAKE_CASE__ ), "label": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
@require_torch
def a ( self : Dict ) -> Optional[Any]:
lowerCAmelCase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowerCAmelCase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
lowerCAmelCase__ = pipeline(
"video-classification" , model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , frame_sampling_rate=4 )
lowerCAmelCase__ = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase__ = video_classifier(SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}] , )
lowerCAmelCase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
] , )
@require_tf
def a ( self : Optional[Any] ) -> Optional[int]:
pass
| 221 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase_ : int = TypeVar("""T""")
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : T):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = data
SCREAMING_SNAKE_CASE_ : Node[T] | None = None
def __str__( self : Union[str, Any]):
'''simple docstring'''
return F'{self.data}'
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Node[T] | None = None
def __iter__( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node.next
def __str__( self : Union[str, Any]):
'''simple docstring'''
return "->".join([str(lowercase_) for item in self])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(tuple(iter(self)))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return self.top is None
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : T):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = Node(lowercase_)
if not self.is_empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = self.top
SCREAMING_SNAKE_CASE_ : Optional[Any] = node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''')
assert isinstance(self.top , lowercase_)
SCREAMING_SNAKE_CASE_ : int = self.top
SCREAMING_SNAKE_CASE_ : int = self.top.next
return pop_node.data
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''')
assert self.top is not None
return self.top.data
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 91 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315 | 0 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCamelCase ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "mock-s3-bucket"
__lowerCAmelCase = f"s3://{mock_bucket}"
__lowerCAmelCase = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith("s3://" ) is False
__lowerCAmelCase = "./local/path"
__lowerCAmelCase = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
__lowerCAmelCase = fsspec.filesystem("file" )
__lowerCAmelCase = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__lowerCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
__lowerCAmelCase = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
__lowerCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = os.path.basename(_UpperCamelCase )
__lowerCAmelCase = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_UpperCamelCase , "r" , encoding="utf-8" ) as f, open(_UpperCamelCase , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__lowerCAmelCase = compressed_file_paths[protocol]
__lowerCAmelCase = "dataset.jsonl"
__lowerCAmelCase = f"{protocol}://{member_file_path}::{compressed_file_path}"
__lowerCAmelCase , *__lowerCAmelCase = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
__lowerCAmelCase = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_UpperCamelCase ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| 364 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_UpperCamelCase , "_dynamo" ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = True ):
'''simple docstring'''
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_UpperCamelCase , "forward" )
__lowerCAmelCase = model.__dict__.pop("_original_forward" , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , "__wrapped__" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_UpperCamelCase , "_converted_to_transformer_engine" , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def _lowerCamelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def _lowerCamelCase ( **_UpperCamelCase ):
'''simple docstring'''
for key, value in kwargs.items():
__lowerCAmelCase = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not hasattr(_UpperCamelCase , "__qualname__" ) and not hasattr(_UpperCamelCase , "__name__" ):
__lowerCAmelCase = getattr(_UpperCamelCase , "__class__" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_UpperCamelCase , "__name__" ):
return obj.__name__
return str(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase = value
return destination
def _lowerCamelCase ( _UpperCamelCase = None ):
'''simple docstring'''
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 259 | 0 |
'''simple docstring'''
import random
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = False ) -> dict:
UpperCAmelCase : dict = {i: [] for i in range(lowerCAmelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCAmelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCAmelCase__ ):
for j in range(i + 1 , lowerCAmelCase__ ):
if random.random() < probability:
graph[i].append(lowerCAmelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCAmelCase__ )
return graph
def __lowerCamelCase ( _lowercase ) -> dict:
return {
i: [j for j in range(lowerCAmelCase__ ) if i != j] for i in range(lowerCAmelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """informer"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "student_t" , SCREAMING_SNAKE_CASE_ : str = "nll" , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : List[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 1_0_0 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str = "prob" , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ):
# time series specific configuration
lowerCAmelCase_ : Dict = prediction_length
lowerCAmelCase_ : List[str] = context_length or prediction_length
lowerCAmelCase_ : List[Any] = distribution_output
lowerCAmelCase_ : int = loss
lowerCAmelCase_ : Optional[int] = input_size
lowerCAmelCase_ : Tuple = num_time_features
lowerCAmelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ : int = scaling
lowerCAmelCase_ : List[Any] = num_dynamic_real_features
lowerCAmelCase_ : Union[str, Any] = num_static_real_features
lowerCAmelCase_ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : str = cardinality
else:
lowerCAmelCase_ : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : Optional[int] = embedding_dimension
else:
lowerCAmelCase_ : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : Optional[Any] = decoder_attention_heads
lowerCAmelCase_ : Any = encoder_ffn_dim
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = encoder_layers
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : Dict = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : Dict = encoder_layerdrop
lowerCAmelCase_ : str = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Union[str, Any] = use_cache
# Informer
lowerCAmelCase_ : Optional[int] = attention_type
lowerCAmelCase_ : Any = sampling_factor
lowerCAmelCase_ : int = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 224 | 0 |
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
if length <= 0 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(lowerCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 66 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Dict = StableDiffusionPanoramaPipeline
UpperCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCamelCase = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase = CLIPTextModel(A )
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A , A=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = """french fries"""
lowerCamelCase = sd_pipe(**A , negative_prompt=A )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A , view_batch_size=2 )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=A )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , A=0 ) -> Dict:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = 0
def callback_fn(A , A , A ) -> None:
lowerCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase = False
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
pipe(**A , callback=A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self ) -> str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A )
lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 66 | 1 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = credit_card_number
lowercase__ = 0
lowercase__ = len(a__ ) - 2
for i in range(a__ , -1 , -2 ):
# double the value of every second digit
lowercase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowercase__ = cc_number[:i] + str(a__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(a__ ) <= 16:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(a__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(a__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 110 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
snake_case_ : Optional[int] = 'sshleifer/bart-tiny-random'
snake_case_ : Tuple = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return AutoConfig.from_pretrained(lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase , *_UpperCamelCase : int = create_student_by_copying_alternating_layers(lowerCamelCase__ ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.num_hidden_layers ,1 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase , *_UpperCamelCase : int = create_student_by_copying_alternating_layers(lowerCamelCase__ ,tempfile.mkdtemp() ,e=1 ,d=lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , *_UpperCamelCase : int = create_student_by_copying_alternating_layers(lowerCamelCase__ ,tempfile.mkdtemp() ,e=1 ,d=lowerCamelCase__ )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase , *_UpperCamelCase : Any = create_student_by_copying_alternating_layers(lowerCamelCase__ ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,1 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
create_student_by_copying_alternating_layers(lowerCamelCase__ ,tempfile.mkdtemp() ,e=lowerCamelCase__ ,d=lowerCamelCase__ )
| 236 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
snake_case_ : Dict = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCamelCase : Optional[int] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
_UpperCamelCase : Optional[Any] = config_class.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[str] = True
print(f'Building TensorFlow model from configuration: {config}' )
_UpperCamelCase : Any = model_class(UpperCAmelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCamelCase : Union[str, Any] = cached_file(
UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCamelCase : List[Any] = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase_ , UpperCAmelCase_ )
if compare_with_pt_model:
_UpperCamelCase : Optional[int] = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase_ ) # build the network
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : List[str] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase_ , config=UpperCAmelCase_ , state_dict=UpperCAmelCase_ )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = pt_model(**pt_model.dummy_inputs )
_UpperCamelCase : int = pto[0].numpy()
_UpperCamelCase : Any = tfo[0].numpy()
_UpperCamelCase : Dict = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(UpperCAmelCase_ , save_format='h5' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , ):
if args_model_type is None:
_UpperCamelCase : List[Any] = list(MODEL_CLASSES.keys() )
else:
_UpperCamelCase : Tuple = [args_model_type]
for j, model_type in enumerate(UpperCAmelCase_ , start=1 ):
print('=' * 1_0_0 )
print(f' Converting model type {j}/{len(UpperCAmelCase_ )}: {model_type}' )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCamelCase : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCamelCase : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCAmelCase_ , UpperCAmelCase_ ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
_UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(UpperCAmelCase_ )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
_UpperCamelCase : Any = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : str = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCamelCase : int = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : List[str] = model_shortcut_name
if os.path.isfile(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=UpperCAmelCase_ , pytorch_checkpoint_path=UpperCAmelCase_ , config_file=UpperCAmelCase_ , tf_dump_path=os.path.join(UpperCAmelCase_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=UpperCAmelCase_ , )
if remove_cached_files:
os.remove(UpperCAmelCase_ )
os.remove(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
snake_case_ : Optional[int] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 236 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Tuple = FunnelTokenizer
lowerCAmelCase__ : Optional[int] = FunnelTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__lowercase = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Optional[int] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a__ ( self : Dict , **_UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
__lowercase = tokenizer('UNwant\u00E9d,running' )
__lowercase = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
__lowercase = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 325 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowerCAmelCase : List[str] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase : str = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def __snake_case ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusConfig(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = torch_model.model.state_dict()
_UpperCAmelCase : Union[str, Any] = {}
for k, v in tf_weights.items():
_UpperCAmelCase : Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase : Any = v.T
_UpperCAmelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase : Tuple = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
_UpperCAmelCase : Any = mapping["shared.weight"]
_UpperCAmelCase : Dict = mapping["shared.weight"]
_UpperCAmelCase : Dict = {k: torch.zeros_like(SCREAMING_SNAKE_CASE__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Optional[Any] = ["Adafactor", "global_step"]
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc="converting tf checkpoint to dict" ):
_UpperCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase : int = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = array
return tf_weights
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = Path(SCREAMING_SNAKE_CASE__ ).parent.name
_UpperCAmelCase : Tuple = task_specific_params[f'summarization_{dataset}']["max_position_embeddings"]
_UpperCAmelCase : Dict = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=SCREAMING_SNAKE_CASE__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert model
_UpperCAmelCase : Union[str, Any] = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase : Optional[int] = task_specific_params
_UpperCAmelCase : str = convert_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(SCREAMING_SNAKE_CASE__ , Path(SCREAMING_SNAKE_CASE__ ) / "pytorch_model.bin" )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
_lowerCAmelCase : Tuple = Path(args.tf_ckpt_path).parent.name
_lowerCAmelCase : Dict = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 368 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : list ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool:
_UpperCAmelCase : Dict = False
if low == high:
return swapped
_UpperCAmelCase : str = low
_UpperCAmelCase : Dict = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase : List[str] = (
collection[right],
collection[left],
)
_UpperCAmelCase : Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase : int = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Dict = low + int((high - low) / 2 )
_UpperCAmelCase : Any = circle_sort_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = circle_sort_util(SCREAMING_SNAKE_CASE__ , mid + 1 , SCREAMING_SNAKE_CASE__ )
return swapped or left_swap or right_swap
_UpperCAmelCase : Tuple = True
while is_not_sorted is True:
_UpperCAmelCase : Dict = circle_sort_util(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
return collection
if __name__ == "__main__":
_lowerCAmelCase : Dict = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : Optional[Any] = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 202 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__a = logging.get_logger(__name__)
__a = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''layoutlmv3'''
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=5_0_2_6_5 , lowerCAmelCase__ : Optional[Any]=7_6_8 , lowerCAmelCase__ : Optional[int]=1_2 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : Any=3_0_7_2 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Union[str, Any]=5_1_2 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Optional[int]=1e-5 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[str]=1_0_2_4 , lowerCAmelCase__ : Tuple=1_2_8 , lowerCAmelCase__ : Tuple=1_2_8 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : int=1_2_8 , lowerCAmelCase__ : str=6_4 , lowerCAmelCase__ : List[Any]=2_5_6 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=2_2_4 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : List[str]=1_6 , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=lowerCAmelCase__ , hidden_size=lowerCAmelCase__ , num_hidden_layers=lowerCAmelCase__ , num_attention_heads=lowerCAmelCase__ , intermediate_size=lowerCAmelCase__ , hidden_act=lowerCAmelCase__ , hidden_dropout_prob=lowerCAmelCase__ , attention_probs_dropout_prob=lowerCAmelCase__ , max_position_embeddings=lowerCAmelCase__ , type_vocab_size=lowerCAmelCase__ , initializer_range=lowerCAmelCase__ , layer_norm_eps=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = max_ad_position_embeddings
_UpperCAmelCase : int = coordinate_size
_UpperCAmelCase : Optional[int] = shape_size
_UpperCAmelCase : str = has_relative_attention_bias
_UpperCAmelCase : Dict = rel_pos_bins
_UpperCAmelCase : str = max_rel_pos
_UpperCAmelCase : Dict = has_spatial_attention_bias
_UpperCAmelCase : Dict = rel_ad_pos_bins
_UpperCAmelCase : Union[str, Any] = max_rel_ad_pos
_UpperCAmelCase : Optional[int] = text_embed
_UpperCAmelCase : Any = visual_embed
_UpperCAmelCase : List[str] = input_size
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = patch_size
_UpperCAmelCase : Tuple = classifier_dropout
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = version.parse('''1.12''' )
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def _lowerCAmelCase ( self : str ) -> float:
"""simple docstring"""
return 1e-5
@property
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return 1_2
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : "ProcessorMixin" , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional["TensorType"] = None , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 4_0 , lowerCAmelCase__ : int = 4_0 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , "apply_ocr" , lowerCAmelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase : Dict = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase : Dict = processor.tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase : List[str] = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase : Any = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase : Optional[Any] = self._generate_dummy_images(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = dict(
processor(
lowerCAmelCase__ , text=lowerCAmelCase__ , boxes=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , ) )
return inputs | 145 | '''simple docstring'''
import math
def __UpperCAmelCase ( a_: int ):
return math.sqrt(a_ ) * math.sqrt(a_ ) == num
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : List[str] = n
while left <= right:
_UpperCAmelCase : Dict = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase : int = mid - 1
else:
_UpperCAmelCase : Tuple = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 145 | 1 |
"""simple docstring"""
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = name
lowerCAmelCase = val
def __str__( self ):
"""simple docstring"""
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , _snake_case ):
"""simple docstring"""
return self.val < other.val
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = self.build_heap(_snake_case )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.get_value(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return (idx - 1) // 2
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return idx * 2 + 1
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return idx * 2 + 2
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.heap_dict[key]
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = len(_snake_case ) - 1
lowerCAmelCase = self.get_parent_idx(_snake_case )
for idx, i in enumerate(_snake_case ):
lowerCAmelCase = idx
lowerCAmelCase = i.val
for i in range(_snake_case , -1 , -1 ):
self.sift_down(_snake_case , _snake_case )
return array
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
while True:
lowerCAmelCase = self.get_left_child_idx(_snake_case ) # noqa: E741
lowerCAmelCase = self.get_right_child_idx(_snake_case )
lowerCAmelCase = idx
if l < len(_snake_case ) and array[l] < array[idx]:
lowerCAmelCase = l
if r < len(_snake_case ) and array[r] < array[smallest]:
lowerCAmelCase = r
if smallest != idx:
lowerCAmelCase = array[smallest], array[idx]
(
lowerCAmelCase
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase = smallest
else:
break
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.get_parent_idx(_snake_case )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase = self.heap[idx], self.heap[p]
lowerCAmelCase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase = p
lowerCAmelCase = self.get_parent_idx(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.heap[0]
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.heap[-1], self.heap[0]
lowerCAmelCase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.heap.append(_snake_case )
lowerCAmelCase = len(self.heap ) - 1
lowerCAmelCase = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase = new_value
lowerCAmelCase = new_value
self.sift_up(self.idx_of_element[node] )
__UpperCamelCase : Dict = Node('''R''', -1)
__UpperCamelCase : Tuple = Node('''B''', 6)
__UpperCamelCase : str = Node('''A''', 3)
__UpperCamelCase : Dict = Node('''X''', 1)
__UpperCamelCase : Optional[Any] = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__UpperCamelCase : List[str] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309 | 0 |
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ ( __lowerCAmelCase ) -> bytes:
if len(__lowerCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__lowercase : int = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ ( __lowerCAmelCase ) -> bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
__lowercase : List[Any] = format(__lowerCAmelCase , '''08x''' )[-8:]
__lowercase : str = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def UpperCAmelCase_ ( __lowerCAmelCase ) -> bytes:
__lowercase : int = b''''''
for char in message:
bit_string += format(__lowerCAmelCase , '''08b''' ).encode('''utf-8''' )
__lowercase : Optional[int] = format(len(__lowerCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowerCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Generator[list[int], None, None]:
if len(__lowerCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(__lowerCAmelCase ) , 512 ):
__lowercase : Union[str, Any] = bit_string[pos : pos + 512]
__lowercase : Dict = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
__lowercase : List[str] = format(__lowerCAmelCase , '''032b''' )
__lowercase : str = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowerCAmelCase , 2 )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return (a + b) % 2**32
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase_ ( __lowerCAmelCase ) -> bytes:
__lowercase : Optional[int] = preprocess(__lowerCAmelCase )
__lowercase : Any = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowercase : int = 0X67452301
__lowercase : Dict = 0XEFCDAB89
__lowercase : List[Any] = 0X98BADCFE
__lowercase : int = 0X10325476
__lowercase : Tuple = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowerCAmelCase ):
__lowercase : List[str] = aa
__lowercase : str = ba
__lowercase : int = ca
__lowercase : Tuple = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowercase : Union[str, Any] = d ^ (b & (c ^ d))
__lowercase : List[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowercase : Union[str, Any] = c ^ (d & (b ^ c))
__lowercase : List[Any] = (5 * i + 1) % 16
elif i <= 47:
__lowercase : Optional[Any] = b ^ c ^ d
__lowercase : List[Any] = (3 * i + 5) % 16
else:
__lowercase : Dict = c ^ (b | not_aa(__lowerCAmelCase ))
__lowercase : Optional[Any] = (7 * i) % 16
__lowercase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowercase : Any = d
__lowercase : List[str] = c
__lowercase : Optional[Any] = b
__lowercase : Optional[int] = sum_aa(__lowerCAmelCase , left_rotate_aa(__lowerCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowercase : Union[str, Any] = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
__lowercase : Optional[int] = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
__lowercase : Dict = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
__lowercase : Union[str, Any] = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
__lowercase : List[str] = reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> int:
if attention_mask is None:
__lowercase : int = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
"""simple docstring"""
A__ : Union[str, Any] = OPTConfig
A__ : Optional[int] = {}
A__ : Optional[int] = '''gelu'''
def __init__( self : Tuple , _snake_case : Dict , _snake_case : List[str]=13 , _snake_case : Optional[Any]=7 , _snake_case : List[str]=True , _snake_case : Union[str, Any]=False , _snake_case : Union[str, Any]=99 , _snake_case : Dict=16 , _snake_case : Any=2 , _snake_case : Dict=4 , _snake_case : List[Any]=4 , _snake_case : Optional[int]="gelu" , _snake_case : List[str]=0.1 , _snake_case : List[str]=0.1 , _snake_case : List[Any]=20 , _snake_case : Any=2 , _snake_case : List[str]=1 , _snake_case : Tuple=0 , _snake_case : Dict=16 , _snake_case : Tuple=16 , ):
__lowercase : Dict = parent
__lowercase : str = batch_size
__lowercase : List[str] = seq_length
__lowercase : Optional[int] = is_training
__lowercase : Optional[int] = use_labels
__lowercase : Optional[int] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : Any = eos_token_id
__lowercase : List[Any] = pad_token_id
__lowercase : Optional[int] = bos_token_id
__lowercase : List[str] = embed_dim
__lowercase : Any = word_embed_proj_dim
__lowercase : Optional[int] = False
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_snake_case , **self.config_updates , )
__lowercase : Optional[int] = prepare_opt_inputs_dict(_snake_case , _snake_case )
return config, inputs_dict
def snake_case_ ( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
__lowercase : int = TFOPTModel(config=_snake_case )
__lowercase : Union[str, Any] = inputs_dict['''input_ids''']
__lowercase : Tuple = input_ids[:1, :]
__lowercase : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
__lowercase : Dict = 1
# first forward pass
__lowercase : Dict = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case )
__lowercase , __lowercase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase : Any = model(_snake_case , attention_mask=_snake_case )[0]
__lowercase : List[Any] = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
__lowercase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A__ : int = (TFOPTForCausalLM,) if is_tf_available() else ()
A__ : List[str] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
A__ : Union[str, Any] = False
A__ : Optional[int] = False
A__ : int = False
A__ : List[str] = 1_0
def snake_case_ ( self : Any ):
__lowercase : Optional[Any] = TFOPTModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=_snake_case )
def snake_case_ ( self : Tuple ):
self.config_tester.run_common_tests()
def snake_case_ ( self : int ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_snake_case : Optional[int] , _snake_case : Dict ):
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowercase : Union[str, Any] = model_class(config=_snake_case )
__lowercase : int = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowercase : str = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_snake_case )
__lowercase : Dict = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowercase : Tuple = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase : List[str] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _snake_case )
# check that weights remain the same after resizing
__lowercase : str = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase : str = False
self.assertTrue(_snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _snake_case )
__lowercase : Optional[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase : Optional[Any] = False
self.assertTrue(_snake_case )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
return tf.constant(__lowerCAmelCase , dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = 9_9
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowercase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowercase : List[str] = input_ids.shape[0]
__lowercase : int = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : Tuple ):
__lowercase : int = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowercase : Optional[int] = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowercase : int = tf.not_equal(_snake_case , model.config.pad_token_id )
with tf.GradientTape():
__lowercase : Optional[int] = model(input_ids=_snake_case , attention_mask=_snake_case ).last_hidden_state
__lowercase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , _snake_case )
__lowercase : Tuple = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-3 ) )
__lowercase : Union[str, Any] = tf.function(_snake_case , jit_compile=_snake_case )
__lowercase : Dict = xla_generate(_snake_case , _snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
super().setUp()
__lowercase : Tuple = '''facebook/opt-350m'''
def snake_case_ ( self : int ):
__lowercase : Any = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase : Dict = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase : Union[str, Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase : int = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case , add_special_tokens=_snake_case )
__lowercase : str = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowercase : str = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
__lowercase : Any = tf.function(_snake_case , jit_compile=_snake_case )
__lowercase : List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self : str ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[Any] = '''facebook/opt-125m'''
__lowercase : int = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase : Dict = []
__lowercase : Optional[int] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowercase : List[Any] = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__lowercase : int = model.generate(_snake_case , max_length=10 )
__lowercase : Union[str, Any] = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[str] = '''facebook/opt-350m'''
__lowercase : List[Any] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : Tuple = TFOPTForCausalLM.from_pretrained(_snake_case )
__lowercase : List[str] = '''left'''
# use different length sentences to test batching
__lowercase : str = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase : List[str] = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case )
__lowercase : Optional[Any] = inputs['''input_ids''']
__lowercase : List[str] = model.generate(input_ids=_snake_case , attention_mask=inputs['''attention_mask'''] )
__lowercase : List[str] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowercase : int = model.generate(input_ids=_snake_case )
__lowercase : Optional[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowercase : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowercase : Optional[Any] = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings )
__lowercase : str = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__lowercase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case )
__lowercase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case )
__lowercase : List[Any] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[Any] = '''facebook/opt-350m'''
__lowercase : str = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase : Union[str, Any] = []
__lowercase : List[str] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowercase : Union[str, Any] = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__lowercase : List[str] = model.generate(_snake_case , max_length=10 )
__lowercase : Tuple = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
| 156 | 1 |
'''simple docstring'''
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase_ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowerCamelCase_ = 'CIDAS/clipseg-rd64-refined'
lowerCamelCase_ = 'image_segmenter'
lowerCamelCase_ = CLIPSegForImageSegmentation
lowerCamelCase_ = ['image', 'text']
lowerCamelCase_ = ['image']
def __init__( self : str , *__lowerCamelCase : str , **__lowerCamelCase : str ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*_a , **_a )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_a , return_tensors="pt" )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
_SCREAMING_SNAKE_CASE = self.model(**_a ).logits
return logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = outputs.cpu().detach().numpy()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 361 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
if n == 1 or not isinstance(__A , __A ):
return 0
elif n == 2:
return 1
else:
_SCREAMING_SNAKE_CASE = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 2
while digits < n:
index += 1
_SCREAMING_SNAKE_CASE = len(str(fibonacci(__A ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( __A : int = 10_00 ) -> int:
return fibonacci_digits_index(__A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 111 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
# setable values
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase )
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCamelCase = 42
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __lowerCamelCase = 1000 , __lowerCamelCase = 0.0_0_0_1 , __lowerCamelCase = 0.0_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "fixed_small" , __lowerCamelCase = True , __lowerCamelCase = "epsilon" , __lowerCamelCase = jnp.floataa , ):
'''simple docstring'''
__A : Tuple = dtype
def UpperCamelCase__( self , __lowerCamelCase = None ):
'''simple docstring'''
if common is None:
__A : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__A : Tuple = jnp.array(1.0 , dtype=self.dtype )
__A : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
return sample
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = () ):
'''simple docstring'''
__A : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__A : Optional[Any] = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = state.common.alphas_cumprod[t]
__A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__A : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__A : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__A : List[Any] = jnp.clip(__lowerCamelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__A : Optional[Any] = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__A : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__A : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__A : Optional[Any] = variance
__A : Optional[Any] = state.common.betas[t]
__A : Any = (predicted_variance + 1) / 2
__A : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ):
'''simple docstring'''
__A : Optional[int] = timestep
if key is None:
__A : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__A , __A : Tuple = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 )
else:
__A : List[str] = None
# 1. compute alphas, betas
__A : Dict = state.common.alphas_cumprod[t]
__A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__A : Tuple = 1 - alpha_prod_t
__A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__A : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__A : Any = model_output
elif self.config.prediction_type == "v_prediction":
__A : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__A : str = jnp.clip(__lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__A : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__A : List[Any] = jax.random.split(__lowerCamelCase , num=1 )
__A : List[str] = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
__A : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__A : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 179 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
if not conversation_id:
__A : List[Any] = uuid.uuida()
if past_user_inputs is None:
__A : List[str] = []
if generated_responses is None:
__A : Tuple = []
__A : uuid.UUID = conversation_id
__A : List[str] = past_user_inputs
__A : List[str] = generated_responses
__A : Optional[str] = text
def __eq__( self , __lowerCamelCase ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__A : str = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__A : Union[str, Any] = text
def UpperCamelCase__( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__A : List[Any] = None
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
self.generated_responses.append(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__A : Optional[Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__A : Tuple = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__A : Union[str, Any] = self.tokenizer.eos_token
def UpperCamelCase__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A : str = {}
__A : List[str] = {}
__A : Any = {}
if min_length_for_response is not None:
__A : int = min_length_for_response
if minimum_tokens is not None:
__A : Any = minimum_tokens
if "max_length" in generate_kwargs:
__A : List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__A : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __lowerCamelCase , __lowerCamelCase=0 , **__lowerCamelCase ):
'''simple docstring'''
__A : Any = super().__call__(__lowerCamelCase , num_workers=__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1:
return outputs[0]
return outputs
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=32 ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__A : List[Any] = self.tokenizer._build_conversation_input_ids(__lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__A : int = self._legacy_parse_and_tokenize(__lowerCamelCase )
if self.framework == "pt":
__A : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__A : int = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=10 , **__lowerCamelCase ):
'''simple docstring'''
__A : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__A : str = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__A : str = max_length - minimum_tokens
__A : Any = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__A : Union[str, Any] = model_inputs['''attention_mask'''][:, -trim:]
__A : Dict = model_inputs.pop('''conversation''' )
__A : List[str] = max_length
__A : Dict = self.model.generate(**__lowerCamelCase , **__lowerCamelCase )
if self.model.config.is_encoder_decoder:
__A : Any = 1
else:
__A : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=True ):
'''simple docstring'''
__A : int = model_outputs['''output_ids''']
__A : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , )
__A : Dict = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__lowerCamelCase )
return conversation
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = self.tokenizer.eos_token_id
__A : List[str] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
if len(__lowerCamelCase ) > self.tokenizer.model_max_length:
__A : List[str] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 179 | 1 |
import os
from distutils.util import strtobool
def UpperCamelCase( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for e in env_keys:
snake_case_ = int(os.environ.get(lowercase_ , -1 ) )
if val >= 0:
return val
return default
def UpperCamelCase( lowercase_ , lowercase_=False ) -> int:
'''simple docstring'''
snake_case_ = os.environ.get(lowercase_ , str(lowercase_ ) )
return strtobool(lowercase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCamelCase( lowercase_ , lowercase_="no" ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = os.environ.get(lowercase_ , str(lowercase_ ) )
return value | 34 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=8 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case_ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
if latents is None:
snake_case_ = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ = latents.to(lowerCamelCase )
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , ) -> Any:
snake_case_ = len(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , truncation=lowerCamelCase , max_length=77 , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = text_inputs.input_ids
snake_case_ = self.tokenizer(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase , lowerCamelCase ):
snake_case_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
snake_case_ = text_input_ids.to(lowerCamelCase )
snake_case_ = text_inputs.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = prompt_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_encoder_hidden_states.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_mask.repeat_interleave(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = [""""""] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='''
f''' {type(lowerCamelCase )}.''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
snake_case_ = negative_prompt
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , max_length=77 , truncation=lowerCamelCase , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = uncond_input.input_ids.to(lowerCamelCase )
snake_case_ = uncond_input.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , lowerCamelCase )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase )
snake_case_ = uncond_text_encoder_hidden_states.shape[1]
snake_case_ = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase , 1 )
snake_case_ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCamelCase , -1 )
snake_case_ = uncond_text_mask.repeat_interleave(lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
snake_case_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
snake_case_ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
snake_case_ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
if self.safety_checker is not None:
snake_case_ , snake_case_ = cpu_offload_with_hook(self.safety_checker , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ) -> List[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 100 , lowerCamelCase = 4.0 , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) -> Union[str, Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = len(lowerCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' )
snake_case_ = self._execution_device
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ , snake_case_ , snake_case_ = self._encode_prompt(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
snake_case_ = self.scheduler.timesteps
snake_case_ = self.unet.config.in_channels
snake_case_ , snake_case_ = get_new_h_w(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
snake_case_ = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ , snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , ).prev_sample
# post-processing
snake_case_ = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase ) | 34 | 1 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = 'pixel_values'
_UpperCAmelCase :str = False
_UpperCAmelCase :Dict = TimmBackboneConfig
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , '''timm''' )
super().__init__(SCREAMING_SNAKE_CASE_ )
lowercase__: Union[str, Any] = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(SCREAMING_SNAKE_CASE_ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase__: Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , '''use_pretrained_backbone''' , SCREAMING_SNAKE_CASE_ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__: str = config.out_indices if getattr(SCREAMING_SNAKE_CASE_ , '''out_indices''' , SCREAMING_SNAKE_CASE_ ) is not None else (-1,)
lowercase__: Union[str, Any] = timm.create_model(
config.backbone , pretrained=SCREAMING_SNAKE_CASE_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__: Dict = self._backbone.return_layers
lowercase__: Any = {layer['''module''']: str(SCREAMING_SNAKE_CASE_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(SCREAMING_SNAKE_CASE_ )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase__: Dict = kwargs.pop('''config''' , TimmBackboneConfig() )
lowercase__: int = kwargs.pop('''use_timm_backbone''' , SCREAMING_SNAKE_CASE_ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase__: Any = kwargs.pop('''num_channels''' , config.num_channels )
lowercase__: Tuple = kwargs.pop('''features_only''' , config.features_only )
lowercase__: Optional[Any] = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
lowercase__: Dict = kwargs.pop('''out_indices''' , config.out_indices )
lowercase__: int = TimmBackboneConfig(
backbone=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , features_only=SCREAMING_SNAKE_CASE_ , use_pretrained_backbone=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , )
return super()._from_config(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self , _UpperCAmelCase ):
pass
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
lowercase__: str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__: List[str] = self._all_layers
lowercase__: Dict = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase__: Optional[Any] = self._return_layers
lowercase__: Dict = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase__: Optional[int] = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase__: str = None
lowercase__: int = tuple(SCREAMING_SNAKE_CASE_ )
lowercase__: List[Any] = tuple(SCREAMING_SNAKE_CASE_ ) if hidden_states is not None else None
if not return_dict:
lowercase__: Optional[Any] = (feature_maps,)
if output_hidden_states:
lowercase__: Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , attentions=SCREAMING_SNAKE_CASE_ )
| 177 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=() , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]="no" , SCREAMING_SNAKE_CASE__ : Dict="29500" ):
UpperCamelCase :List[Any] = False
UpperCamelCase :Tuple = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCamelCase :Dict = True
elif "IPython" in sys.modules:
UpperCamelCase :int = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCamelCase :Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , SCREAMING_SNAKE_CASE__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCamelCase :Tuple = 8
UpperCamelCase :Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*SCREAMING_SNAKE_CASE__ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr='''127.0.01''' , master_port=SCREAMING_SNAKE_CASE__ , mixed_precision=SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase :Any = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=() , SCREAMING_SNAKE_CASE__ : int=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCamelCase :Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , debug=SCREAMING_SNAKE_CASE__ )
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
| 259 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={'vocab_file': 'spm_char.model'}
lowercase ={
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowercase ={
'microsoft/speecht5_asr': 1024,
'microsoft/speecht5_tts': 1024,
'microsoft/speecht5_vc': 1024,
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =VOCAB_FILES_NAMES
UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase =["input_ids", "attention_mask"]
def __init__( self , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case = None , **snake_case , ) -> None:
'''simple docstring'''
_UpperCAmelCase : List[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
_UpperCAmelCase : Any =vocab_file
_UpperCAmelCase : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(snake_case)
@property
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict ={self.convert_ids_to_tokens(snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict =self.__dict__.copy()
_UpperCAmelCase : Dict =None
return state
def __setstate__( self , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCAmelCase : Any ={}
_UpperCAmelCase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self , snake_case) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case , out_type=snake_case)
def lowerCAmelCase ( self , snake_case) -> Dict:
'''simple docstring'''
return self.sp_model.piece_to_id(snake_case)
def lowerCAmelCase ( self , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =self.sp_model.IdToPiece(snake_case)
return token
def lowerCAmelCase ( self , snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple =[]
_UpperCAmelCase : List[Any] =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case) + token
_UpperCAmelCase : Tuple =[]
else:
current_sub_tokens.append(snake_case)
out_string += self.sp_model.decode(snake_case)
return out_string.strip()
def lowerCAmelCase ( self , snake_case , snake_case=None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , snake_case , snake_case = None , snake_case = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case)
_UpperCAmelCase : List[Any] =[1]
if token_ids_a is None:
return ([0] * len(snake_case)) + suffix_ones
return ([0] * len(snake_case)) + ([0] * len(snake_case)) + suffix_ones
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase : Tuple =os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , snake_case)
elif not os.path.isfile(self.vocab_file):
with open(snake_case , 'wb') as fi:
_UpperCAmelCase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(snake_case)
return (out_vocab_file,)
| 242 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , **snake_case) -> Optional[int]:
'''simple docstring'''
super().__init__(**snake_case)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , snake_case , **snake_case) -> str:
'''simple docstring'''
return super().__call__(snake_case , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : str ={}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Union[str, Any] =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCAmelCase : List[Any] =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , snake_case , snake_case=None , snake_case="This is a photo of {}.") -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =load_image(snake_case)
_UpperCAmelCase : Union[str, Any] =self.image_processor(images=[image] , return_tensors=self.framework)
_UpperCAmelCase : Union[str, Any] =candidate_labels
_UpperCAmelCase : List[Any] =[hypothesis_template.format(snake_case) for x in candidate_labels]
_UpperCAmelCase : str =self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case)
_UpperCAmelCase : Any =[text_inputs]
return inputs
def lowerCAmelCase ( self , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] =model_inputs.pop('candidate_labels')
_UpperCAmelCase : Tuple =model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , snake_case):
_UpperCAmelCase : Any =text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : str =text_inputs[0][0]
_UpperCAmelCase : Any =self.model(**snake_case , **snake_case)
_UpperCAmelCase : List[str] ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str =model_outputs.pop('candidate_labels')
_UpperCAmelCase : Union[str, Any] =model_outputs['logits'][0]
if self.framework == "pt":
_UpperCAmelCase : Dict =logits.softmax(dim=-1).squeeze(-1)
_UpperCAmelCase : Union[str, Any] =probs.tolist()
if not isinstance(snake_case , snake_case):
_UpperCAmelCase : Union[str, Any] =[scores]
elif self.framework == "tf":
_UpperCAmelCase : Dict =stable_softmax(snake_case , axis=-1)
_UpperCAmelCase : str =probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_UpperCAmelCase : List[str] =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case , snake_case) , key=lambda snake_case: -x[0])
]
return result
| 242 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_, snake_case_ :Any = analyze_text(_lowercase )
snake_case_ :str = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
snake_case_ :Any = sum(single_char_strings.values() )
# one length string
snake_case_ :Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
snake_case_ :List[str] = single_char_strings[ch]
snake_case_ :Any = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
snake_case_ :Dict = sum(two_char_strings.values() )
snake_case_ :Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
snake_case_ :Dict = cha + cha
if sequence in two_char_strings:
snake_case_ :Union[str, Any] = two_char_strings[sequence]
snake_case_ :Union[str, Any] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :int = Counter() # type: ignore
snake_case_ :List[str] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A_ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 66 |
"""simple docstring"""
import re
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(_lowercase, _lowercase ) )
if __name__ == "__main__":
__a = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 66 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Dict = image_size
__lowerCAmelCase : List[str] = patch_size
__lowerCAmelCase : Dict = num_channels
__lowerCAmelCase : Tuple = embed_dim
__lowerCAmelCase : List[Any] = depths
__lowerCAmelCase : List[str] = num_heads
__lowerCAmelCase : Any = window_size
__lowerCAmelCase : List[str] = mlp_ratio
__lowerCAmelCase : Union[str, Any] = qkv_bias
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : int = drop_path_rate
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Dict = use_absolute_embeddings
__lowerCAmelCase : Union[str, Any] = patch_norm
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : str = scope
__lowerCAmelCase : List[Any] = use_labels
__lowerCAmelCase : List[Any] = type_sequence_label_size
__lowerCAmelCase : List[Any] = encoder_stride
__lowerCAmelCase : List[str] = out_features
__lowerCAmelCase : Optional[Any] = out_indices
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[str] = None
if self.use_labels:
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCAmelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = ['stem']
__lowerCAmelCase : str = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = config_and_inputs
__lowerCAmelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
A_ : List[str] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
A_ : Optional[Any] = False
A_ : Optional[Any] = False
A_ : Union[str, Any] = False
A_ : Optional[int] = False
A_ : Optional[int] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = MaskFormerSwinModelTester(self )
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('Swin does not support feedforward chunking' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Any = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[int] = outputs.hidden_states
__lowerCAmelCase : Dict = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
__lowerCAmelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Tuple = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCAmelCase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCAmelCase : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : str = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has"
f" `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
__lowerCAmelCase : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class A__ ( unittest.TestCase , _lowerCamelCase):
A_ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = MaskFormerSwinConfig
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = MaskFormerSwinModelTester(self )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCAmelCase : Any = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
__lowerCAmelCase : str = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCAmelCase : str = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCAmelCase : List[Any] = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions ) | 182 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__lowerCAmelCase : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 'sgugger/tiny-distilbert-classification'
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'patrickvonplaten/t5-tiny-random'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() ) | 182 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=UpperCAmelCase , tokenizer=UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier(UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase , )
self.run_entailment_id(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 39 |
def _lowerCAmelCase ( lowerCAmelCase_ :int | float | str )->tuple[int, int]:
'''simple docstring'''
try:
snake_case_ = float(lowerCAmelCase_ )
except ValueError:
raise ValueError("Please enter a valid number" )
snake_case_ = decimal - int(lowerCAmelCase_ )
if fractional_part == 0:
return int(lowerCAmelCase_ ), 1
else:
snake_case_ = len(str(lowerCAmelCase_ ).split("." )[1] )
snake_case_ = int(decimal * (10**number_of_frac_digits) )
snake_case_ = 10**number_of_frac_digits
snake_case_ , snake_case_ = denominator, numerator
while True:
snake_case_ = dividend % divisor
if remainder == 0:
break
snake_case_ , snake_case_ = divisor, remainder
snake_case_ , snake_case_ = numerator / divisor, denominator / divisor
return int(lowerCAmelCase_ ), int(lowerCAmelCase_ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 159 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowerCamelCase_ : Dict = IFInpaintingPipeline
lowerCamelCase_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCamelCase_ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase_ ( self ) -> int:
return self._get_dummy_components()
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=0 ) -> Any:
if str(lowerCamelCase ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(lowerCamelCase )
else:
snake_case_ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
snake_case_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase_ ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase_ ( self ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
self._test_save_load_local()
def lowerCAmelCase_ ( self ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , ) | 34 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCamelCase_ = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
snake_case_ = EfficientNetConfig()
snake_case_ = CONFIG_MAP[model_name]["""hidden_dim"""]
snake_case_ = CONFIG_MAP[model_name]["""width_coef"""]
snake_case_ = CONFIG_MAP[model_name]["""depth_coef"""]
snake_case_ = CONFIG_MAP[model_name]["""image_size"""]
snake_case_ = CONFIG_MAP[model_name]["""dropout_rate"""]
snake_case_ = CONFIG_MAP[model_name]["""dw_padding"""]
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = 1000
snake_case_ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(lowercase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase( ) -> Tuple:
'''simple docstring'''
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
def UpperCamelCase( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = CONFIG_MAP[model_name]["""image_size"""]
snake_case_ = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowercase_ , )
return preprocessor
def UpperCamelCase( lowercase_ ) -> str:
'''simple docstring'''
snake_case_ = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
snake_case_ = sorted(set(lowercase_ ) )
snake_case_ = len(lowercase_ )
snake_case_ = {b: str(lowercase_ ) for b, i in zip(lowercase_ , range(lowercase_ ) )}
snake_case_ = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
snake_case_ = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
snake_case_ = {}
for item in rename_keys:
if item[0] in original_param_names:
snake_case_ = """efficientnet.""" + item[1]
snake_case_ = """classifier.weight"""
snake_case_ = """classifier.bias"""
return key_mapping
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
snake_case_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
snake_case_ = torch.from_numpy(lowercase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
snake_case_ = torch.from_numpy(lowercase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
snake_case_ = torch.from_numpy(np.transpose(lowercase_ ) )
else:
snake_case_ = torch.from_numpy(lowercase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase_ )
@torch.no_grad()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
snake_case_ = model_classes[model_name](
include_top=lowercase_ , weights="""imagenet""" , input_tensor=lowercase_ , input_shape=lowercase_ , pooling=lowercase_ , classes=1000 , classifier_activation="""softmax""" , )
snake_case_ = original_model.trainable_variables
snake_case_ = original_model.non_trainable_variables
snake_case_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
snake_case_ = param.numpy()
snake_case_ = list(tf_params.keys() )
# Load HuggingFace model
snake_case_ = get_efficientnet_config(lowercase_ )
snake_case_ = EfficientNetForImageClassification(lowercase_ ).eval()
snake_case_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
snake_case_ = rename_keys(lowercase_ )
replace_params(lowercase_ , lowercase_ , lowercase_ )
# Initialize preprocessor and preprocess input image
snake_case_ = convert_image_processor(lowercase_ )
snake_case_ = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
snake_case_ = hf_model(**lowercase_ )
snake_case_ = outputs.logits.detach().numpy()
# Original model inference
snake_case_ = False
snake_case_ = CONFIG_MAP[model_name]["""image_size"""]
snake_case_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
snake_case_ = image.img_to_array(lowercase_ )
snake_case_ = np.expand_dims(lowercase_ , axis=0 )
snake_case_ = original_model.predict(lowercase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase_ , lowercase_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase_ ):
os.mkdir(lowercase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase_ )
preprocessor.save_pretrained(lowercase_ )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
snake_case_ = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase_ )
hf_model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCamelCase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 34 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 276 |
import random
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = num - 1
UpperCAmelCase_: Optional[int] = 0
while s % 2 == 0:
UpperCAmelCase_: Union[str, Any] = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase_: str = random.randrange(2 , num - 1 )
UpperCAmelCase_: Optional[Any] = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if v != 1:
UpperCAmelCase_: Dict = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase_: Optional[int] = i + 1
UpperCAmelCase_: int = (v**2) % num
return True
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
if num < 2:
return False
UpperCAmelCase_: Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: int = 1_0_2_4 ):
"""simple docstring"""
while True:
UpperCAmelCase_: List[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase__ ):
return num
if __name__ == "__main__":
a : Optional[int] = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 147 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class snake_case_ ( __lowercase ):
A_ = 'codegen'
A_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , _snake_case : int=50400 , _snake_case : List[str]=2048 , _snake_case : Optional[int]=2048 , _snake_case : List[Any]=4096 , _snake_case : Any=28 , _snake_case : Tuple=16 , _snake_case : Any=64 , _snake_case : Any=None , _snake_case : List[str]="gelu_new" , _snake_case : str=0.0 , _snake_case : int=0.0 , _snake_case : Tuple=0.0 , _snake_case : Any=1E-5 , _snake_case : int=0.02 , _snake_case : str=True , _snake_case : int=50256 , _snake_case : List[str]=50256 , _snake_case : str=False , **_snake_case : Union[str, Any] , )->Any:
'''simple docstring'''
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : Dict = n_ctx
__lowerCAmelCase : List[Any] = n_positions
__lowerCAmelCase : List[str] = n_embd
__lowerCAmelCase : Tuple = n_layer
__lowerCAmelCase : List[Any] = n_head
__lowerCAmelCase : Optional[Any] = n_inner
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Optional[int] = activation_function
__lowerCAmelCase : List[str] = resid_pdrop
__lowerCAmelCase : str = embd_pdrop
__lowerCAmelCase : Optional[int] = attn_pdrop
__lowerCAmelCase : Optional[int] = layer_norm_epsilon
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : str = use_cache
__lowerCAmelCase : int = bos_token_id
__lowerCAmelCase : str = eos_token_id
super().__init__(
bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case )
class snake_case_ ( __lowercase ):
def __init__( self : Optional[Any] , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , )->Union[str, Any]:
'''simple docstring'''
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case )
if not getattr(self._config , """pad_token_id""" , _snake_case ):
# TODO: how to do that better?
__lowerCAmelCase : Any = 0
@property
def UpperCAmelCase__ ( self : Optional[int] )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCAmelCase : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction="""inputs""" )
__lowerCAmelCase : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self : Union[str, Any] )->int:
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , )->Mapping[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = super(_snake_case , self ).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase : str = seqlen + 2
__lowerCAmelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase : int = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(self.num_layers )
]
__lowerCAmelCase : List[str] = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase : int = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase : Dict = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : Optional[Any] )->int:
'''simple docstring'''
return 13 | 232 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class snake_case_ ( __lowercase ):
A_ = 'gptj'
A_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , _snake_case : Optional[int]=50400 , _snake_case : Union[str, Any]=2048 , _snake_case : Tuple=4096 , _snake_case : Optional[int]=28 , _snake_case : Tuple=16 , _snake_case : Optional[Any]=64 , _snake_case : Optional[int]=None , _snake_case : str="gelu_new" , _snake_case : str=0.0 , _snake_case : Optional[int]=0.0 , _snake_case : List[str]=0.0 , _snake_case : Tuple=1E-5 , _snake_case : List[str]=0.02 , _snake_case : Optional[int]=True , _snake_case : Optional[Any]=50256 , _snake_case : List[str]=50256 , _snake_case : str=False , **_snake_case : Tuple , )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = n_positions
__lowerCAmelCase : Optional[Any] = n_embd
__lowerCAmelCase : Optional[int] = n_layer
__lowerCAmelCase : Optional[int] = n_head
__lowerCAmelCase : List[Any] = n_inner
__lowerCAmelCase : List[str] = rotary_dim
__lowerCAmelCase : int = activation_function
__lowerCAmelCase : str = resid_pdrop
__lowerCAmelCase : Union[str, Any] = embd_pdrop
__lowerCAmelCase : Dict = attn_pdrop
__lowerCAmelCase : Optional[Any] = layer_norm_epsilon
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Dict = use_cache
__lowerCAmelCase : List[str] = bos_token_id
__lowerCAmelCase : List[Any] = eos_token_id
super().__init__(
bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case )
class snake_case_ ( __lowercase ):
def __init__( self : Union[str, Any] , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , )->str:
'''simple docstring'''
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case )
if not getattr(self._config , """pad_token_id""" , _snake_case ):
# TODO: how to do that better?
__lowerCAmelCase : Dict = 0
@property
def UpperCAmelCase__ ( self : Tuple )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCAmelCase : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction="""inputs""" )
__lowerCAmelCase : Dict = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self : Dict )->int:
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase__ ( self : str , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , )->Mapping[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = super(_snake_case , self ).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase : Optional[Any] = seqlen + 2
__lowerCAmelCase : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase : Tuple = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(self.num_layers )
]
__lowerCAmelCase : int = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase : List[str] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return 13 | 232 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = image.size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_: Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
SCREAMING_SNAKE_CASE_: str = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_: Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_: str = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : VQModel , lowerCAmelCase__ : UNetaDModel , lowerCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__)
@torch.no_grad()
def __call__( self : str , lowerCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 100 , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , ):
if isinstance(lowerCAmelCase__ , PIL.Image.Image):
SCREAMING_SNAKE_CASE_: List[str] = 1
elif isinstance(lowerCAmelCase__ , torch.Tensor):
SCREAMING_SNAKE_CASE_: str = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase__)}")
if isinstance(lowerCAmelCase__ , PIL.Image.Image):
SCREAMING_SNAKE_CASE_: Dict = preprocess(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE_: List[str] = (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE_: List[str] = next(self.unet.parameters()).dtype
SCREAMING_SNAKE_CASE_: Dict = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = image.to(device=self.device , dtype=lowerCAmelCase__)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device)
SCREAMING_SNAKE_CASE_: List[str] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_: List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE_: List[str] = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_: int = eta
for t in self.progress_bar(lowerCAmelCase__):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cat([latents, image] , dim=1)
SCREAMING_SNAKE_CASE_: List[Any] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__)
# predict the noise residual
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_: int = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE_: int = self.vqvae.decode(lowerCAmelCase__).sample
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.clamp(lowerCAmelCase__ , -1.0 , 1.0)
SCREAMING_SNAKE_CASE_: Optional[int] = image / 2 + 0.5
SCREAMING_SNAKE_CASE_: Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_: Dict = self.numpy_to_pil(lowerCAmelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__)
| 13 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
lowerCamelCase__ : Dict = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
lowerCamelCase__ : Union[str, Any] = 'huggingface/label-files'
if "o365" in model_name:
lowerCamelCase__ : List[Any] = 366
lowerCamelCase__ : int = 'object365-id2label.json'
else:
lowerCamelCase__ : Tuple = 91
lowerCamelCase__ : Dict = 'coco-detection-id2label.json'
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[int] = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = dct.pop(_lowerCamelCase )
lowerCamelCase__ : Tuple = val
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = in_proj_weight[:dim, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[: dim]
lowerCamelCase__ : Any = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : List[Any] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : Any = in_proj_weight[
-dim :, :
]
lowerCamelCase__ : Dict = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# transformer decoder self-attention layers
lowerCamelCase__ : Optional[Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase__ : Any = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Any = in_proj_weight[:hidden_size, :]
lowerCamelCase__ : Optional[Any] = in_proj_bias[:hidden_size]
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCamelCase__ : str = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Union[str, Any] = in_proj_weight[-hidden_size:, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[-hidden_size:]
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
lowerCamelCase__ : int = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
lowerCamelCase__ : Any = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
lowerCamelCase__ : Optional[Any] = torch.load(_lowerCamelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
lowerCamelCase__ : Tuple = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCamelCase__ : int = state_dict.pop(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = val
if "input_proj" in key:
lowerCamelCase__ : List[Any] = state_dict.pop(_lowerCamelCase )
lowerCamelCase__ : Any = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCamelCase__ : Dict = state_dict.pop(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ : Tuple = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
lowerCamelCase__ : int = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_lowerCamelCase )
# load image processor
lowerCamelCase__ : str = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Any = processor(images=_lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : List[str] = encoding['pixel_values']
lowerCamelCase__ : Any = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
lowerCamelCase__ : List[str] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
lowerCamelCase__ : str = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
lowerCamelCase__ : List[str] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 367 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 26 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__UpperCAmelCase : Any = "src/diffusers"
# Matches is_xxx_available()
__UpperCAmelCase : List[str] = re.compile(R"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
__UpperCAmelCase : Dict = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
__UpperCAmelCase : int = "\n{0} = None\n"
__UpperCAmelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
__UpperCAmelCase : Tuple = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: int = _re_backend.findall(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE__)
def A__ ( ) -> Optional[int]:
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """__init__.py""") , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
__snake_case: Optional[Any] = f.readlines()
# Get to the point we do the actual imports for type checking
__snake_case: Tuple = 0
__snake_case: Any = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE__):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__snake_case: List[Any] = find_backend(lines[line_index])
if backend is not None:
while not lines[line_index].startswith("""else:"""):
line_index += 1
line_index += 1
__snake_case: Any = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE__) and len(lines[line_index]) > 1:
__snake_case: List[Any] = lines[line_index]
__snake_case: str = _re_single_line_import.search(SCREAMING_SNAKE_CASE__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 8):
objects.append(line[8:-2])
line_index += 1
if len(SCREAMING_SNAKE_CASE__) > 0:
__snake_case: Optional[Any] = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE__)
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__=None) -> Optional[int]:
if backend_specific_objects is None:
__snake_case: Union[str, Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__snake_case: Union[str, Any] = {}
for backend, objects in backend_specific_objects.items():
__snake_case: List[Any] = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""")) + """]"""
__snake_case: Dict = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for o in objects])
__snake_case: List[str] = dummy_file
return dummy_files
def A__ ( SCREAMING_SNAKE_CASE__=False) -> int:
__snake_case: List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__snake_case: Dict = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
__snake_case: Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , """utils""")
__snake_case: List[Any] = {
backend: os.path.join(SCREAMING_SNAKE_CASE__ , F'''dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}_objects.py''')
for backend in dummy_files.keys()
}
__snake_case: int = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE__):
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
__snake_case: Any = f.read()
else:
__snake_case: Tuple = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}_objects.py as the main '''
"""__init__ has new objects.""")
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.write(dummy_files[backend])
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}_objects.py. Run `make fix-copies` '''
"""to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : List[Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 111 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_snake_case = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__A , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__A , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__A )
return parser.parse_args()
def SCREAMING_SNAKE_CASE__ ( ) -> str:
_snake_case = parse_args()
# Import training_script as a module.
_snake_case = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_snake_case = script_fpath.stem
_snake_case = importlib.import_module(__A )
# Patch sys.argv
_snake_case = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCAmelCase ( torch.nn.Module ):
def __init__( self , lowerCAmelCase_="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(lowerCAmelCase_ , self ).__init__()
_snake_case = AutoModel.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = torch.nn.CosineSimilarity(3 , 1E-08 )
_snake_case = torch.nn.Softmax(dim=1 )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
return self.bert(**lowerCAmelCase_ ).last_hidden_state
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = W_supports['sizes'].tolist()
_snake_case = W_supports['start_token_id'].item()
_snake_case = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case = self.BERT(**lowerCAmelCase_ )
_snake_case = self.BERT(**lowerCAmelCase_ )
_snake_case = None
_snake_case = None
_snake_case = W_supports['input_ids'] == start_token_id
_snake_case = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCAmelCase_ ):
if i == 0:
_snake_case = 0
else:
_snake_case = support_sizes[i - 1]
_snake_case = S[s : s + size][start_token_masks[s : s + size]]
_snake_case = S[s : s + size][end_token_masks[s : s + size]]
_snake_case = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case = torch.vstack((p_starts, p_start) )
_snake_case = torch.vstack((p_ends, p_end) )
else:
_snake_case = p_start
_snake_case = p_end
return p_starts, p_ends
| 160 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A =logging.get_logger(__name__)
A ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
A =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def snake_case_ (_a : int , _a : Optional[int] , _a : int , _a : List[Any] , _a : Tuple ):
for attribute in key.split('''.''' ):
UpperCAmelCase = getattr(_a , _a )
if weight_type is not None:
UpperCAmelCase = getattr(_a , _a ).shape
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "running_mean":
UpperCAmelCase = value
elif weight_type == "running_var":
UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase = value
elif weight_type == "inv_freq":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ (_a : List[Any] , _a : int , _a : Dict ):
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(_a )[0].split('''.''' )[-2]
UpperCAmelCase = mapped_key.replace('''*''' , _a )
if "pos_bias_u" in name:
UpperCAmelCase = None
elif "pos_bias_v" in name:
UpperCAmelCase = None
elif "weight_g" in name:
UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase = '''weight_v'''
elif "bias" in name:
UpperCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = '''weight'''
elif "running_mean" in name:
UpperCAmelCase = '''running_mean'''
elif "inv_freq" in name:
UpperCAmelCase = '''inv_freq'''
elif "running_var" in name:
UpperCAmelCase = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase = '''num_batches_tracked'''
else:
UpperCAmelCase = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ (_a : Any , _a : int , _a : str , _a : Any , _a : Dict ):
UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase = name.split('''.''' )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_a )
@torch.no_grad()
def snake_case_ (_a : List[Any] , _a : int , _a : Any=None , _a : List[str]=None , _a : List[Any]=True ):
if config_path is not None:
UpperCAmelCase = WavaVecaConformerConfig.from_pretrained(_a , hidden_act='''swish''' )
else:
UpperCAmelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase = '''rotary'''
if is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(_a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(_a , '''vocab.json''' )
if not os.path.isdir(_a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_a ) )
return
os.makedirs(_a , exist_ok=_a )
UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase = 0
UpperCAmelCase = 1
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_a , _a )
UpperCAmelCase = WavaVecaCTCTokenizer(
_a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_a , )
UpperCAmelCase = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_a , return_attention_mask=_a , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=_a , tokenizer=_a )
processor.save_pretrained(_a )
UpperCAmelCase = WavaVecaConformerForCTC(_a )
else:
UpperCAmelCase = WavaVecaConformerForPreTraining(_a )
if is_finetuned:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase = fairseq.tasks.setup_task(_a )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_a )
UpperCAmelCase = model[0].eval()
recursively_load_weights(_a , _a , not is_finetuned )
hf_wavavec.save_pretrained(_a )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 34 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A =logging.get_logger(__name__)
class _a ( __a ):
__a : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(lowercase ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34 | 1 |
def UpperCAmelCase ( _lowerCamelCase ):
A : str = [False] * len(UpperCAmelCase__ )
A : List[Any] = [-1] * len(UpperCAmelCase__ )
def dfs(_lowerCamelCase , _lowerCamelCase ):
A : int = True
A : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 363 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split("." ):
A : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
A : List[str] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
A : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A : List[str] = value
elif weight_type == "weight_g":
A : List[Any] = value
elif weight_type == "weight_v":
A : Any = value
elif weight_type == "bias":
A : Dict = value
else:
A : Any = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Dict = []
A : List[str] = fairseq_model.state_dict()
A : List[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
A : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
A : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A : List[str] = True
if "*" in mapped_key:
A : Any = name.split(_lowerCamelCase )[0].split("." )[-2]
A : Union[str, Any] = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
A : Optional[Any] = "weight_g"
elif "weight_v" in name:
A : int = "weight_v"
elif "weight" in name:
A : Dict = "weight"
elif "bias" in name:
A : Optional[int] = "bias"
else:
A : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = full_name.split("conv_layers." )[-1]
A : str = name.split("." )
A : str = int(items[0] )
A : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = SEWConfig()
if is_finetuned:
A : int = model.wav_encoder.wav_model.cfg
else:
A : Any = model.cfg
A : List[str] = fs_config.conv_bias
A : Optional[Any] = eval(fs_config.conv_feature_layers )
A : Dict = [x[0] for x in conv_layers]
A : int = [x[1] for x in conv_layers]
A : Dict = [x[2] for x in conv_layers]
A : Tuple = "gelu"
A : List[Any] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
A : List[str] = 0.0
A : Union[str, Any] = fs_config.activation_fn.name
A : int = fs_config.encoder_embed_dim
A : List[str] = 0.02
A : Dict = fs_config.encoder_ffn_embed_dim
A : List[str] = 1e-5
A : List[str] = fs_config.encoder_layerdrop
A : Optional[int] = fs_config.encoder_attention_heads
A : Any = fs_config.conv_pos_groups
A : str = fs_config.conv_pos
A : str = len(_lowerCamelCase )
A : Optional[int] = fs_config.encoder_layers
A : List[str] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A : int = model.cfg
A : List[str] = fs_config.final_dropout
A : Optional[int] = fs_config.layerdrop
A : Dict = fs_config.activation_dropout
A : Dict = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A : Dict = fs_config.attention_dropout
A : Optional[Any] = fs_config.dropout_input
A : Union[str, Any] = fs_config.dropout
A : Tuple = fs_config.mask_channel_length
A : int = fs_config.mask_channel_prob
A : Optional[Any] = fs_config.mask_length
A : Union[str, Any] = fs_config.mask_prob
A : int = "Wav2Vec2FeatureExtractor"
A : List[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
if is_finetuned:
A , A , A : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
A , A , A : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A : Any = SEWConfig.from_pretrained(_lowerCamelCase )
else:
A : List[str] = convert_config(model[0] , _lowerCamelCase )
A : List[str] = model[0].eval()
A : Optional[int] = True if config.feat_extract_norm == "layer" else False
A : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
A : Union[str, Any] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : str = target_dict.pad_index
A : Dict = target_dict.bos_index
A : List[str] = target_dict.pad_index
A : Any = target_dict.bos_index
A : str = target_dict.eos_index
A : Dict = len(target_dict.symbols )
A : List[Any] = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
A : str = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
A : int = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
A : Optional[int] = SEWForCTC(_lowerCamelCase )
else:
A : Dict = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
) | 256 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase , """depth_multiplier""" ) )
class _lowerCamelCase :
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : Union[str, Any]=13 , UpperCamelCase : Any=3 , UpperCamelCase : List[str]=32 , UpperCamelCase : Dict=0.25 , UpperCamelCase : Any=8 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : str=6 , UpperCamelCase : Dict=32 , UpperCamelCase : str=True , UpperCamelCase : int=True , UpperCamelCase : Tuple=True , UpperCamelCase : int="relu6" , UpperCamelCase : Optional[int]=12_80 , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.02 , UpperCamelCase : int=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[str]=10 , UpperCamelCase : Optional[int]=None , ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : Union[str, Any] = depth_multiplier
lowerCAmelCase__ : List[Any] = depth_divisible_by
lowerCAmelCase__ : Optional[Any] = min_depth
lowerCAmelCase__ : List[str] = expand_ratio
lowerCAmelCase__ : Optional[int] = tf_padding
lowerCAmelCase__ : int = output_stride
lowerCAmelCase__ : Optional[Any] = first_layer_is_expansion
lowerCAmelCase__ : List[Any] = finegrained_output
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Tuple = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowerCAmelCase__ : int = classifier_dropout_prob
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Optional[int] = scope
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Any = None
if self.use_labels:
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = MobileNetVaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = MobileNetVaForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Any = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Optional[Any] = MobileNetVaForSemanticSegmentation(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Dict = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase__ : Dict = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = config_and_inputs
lowerCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( a_ , a_ , unittest.TestCase ):
_lowerCamelCase :int = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCamelCase :List[Any] = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase :Any = False
_lowerCamelCase :List[str] = False
_lowerCamelCase :Any = False
_lowerCamelCase :Union[str, Any] = False
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = MobileNetVaModelTester(self )
lowerCAmelCase__ : Optional[Any] = MobileNetVaConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(UpperCamelCase )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Any = [*signature.parameters.keys()]
lowerCAmelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[Any] ):
lowerCAmelCase__ : int = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = outputs.hidden_states
lowerCAmelCase__ : List[Any] = 16
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Tuple = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase )
@slow
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[Any] = MobileNetVaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(UpperCamelCase )
lowerCAmelCase__ : int = self.default_image_processor
lowerCAmelCase__ : Any = prepare_img()
lowerCAmelCase__ : Tuple = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**UpperCamelCase )
# verify the logits
lowerCAmelCase__ : int = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCAmelCase__ : int = torch.tensor([0.2445, -1.1993, 0.1905] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowerCAmelCase__ : int = model.to(UpperCamelCase )
lowerCAmelCase__ : List[Any] = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : int = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**UpperCamelCase )
lowerCAmelCase__ : Any = outputs.logits
# verify the logits
lowerCAmelCase__ : int = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , UpperCamelCase )
lowerCAmelCase__ : List[str] = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase , atol=1E-4 ) )
| 242 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_A = """bart"""
_A = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
lowerCAmelCase__ : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
lowerCAmelCase__ : List[str] = qar_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
lowerCAmelCase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
lowerCAmelCase__ : List[str] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
lowerCAmelCase__ : Dict = sas_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> Union[str, Any]:
if LOAD_DENSE_INDEX:
lowerCAmelCase__ : Union[str, Any] = faiss.StandardGpuResources()
lowerCAmelCase__ : List[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
lowerCAmelCase__ : Dict = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
lowerCAmelCase__ : str = faiss.IndexFlatIP(128 )
lowerCAmelCase__ : Optional[int] = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (None, None)
lowerCAmelCase__ : Optional[int] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
lowerCAmelCase__ : List[Any] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
lowerCAmelCase__ : Tuple = elia["""train_eli5"""]
lowerCAmelCase__ : Dict = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
lowerCAmelCase__ : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
_A , _A , _A = load_indexes()
_A , _A , _A , _A = load_models()
_A , _A = load_train_data()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=10 ) -> Optional[Any]:
lowerCAmelCase__ : str = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="wiki40b" , __UpperCAmelCase="dense" , __UpperCAmelCase=10 ) -> List[str]:
if source == "none":
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=__UpperCAmelCase , )
lowerCAmelCase__ : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowerCAmelCase__ : Optional[Any] = """question: {} context: {}""".format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCAmelCase : None),
} )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=64 , __UpperCAmelCase=256 , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0.8 ) -> Optional[int]:
with torch.no_grad():
lowerCAmelCase__ : List[Any] = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_A = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_A = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_A = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_A = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_A = st.sidebar.checkbox("""Demo options""")
if demo_options:
_A = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_A = action_list.index(action_st)
_A = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_A = show_type == """Show full text of passages"""
else:
_A = 3
_A = True
_A = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_A = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_A = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_A = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_A = """wiki40b"""
_A = """dense"""
_A = """beam"""
_A = 2
_A = 6_4
_A = 2_5_6
_A = None
_A = None
_A = st.sidebar.checkbox("""Generation options""")
if generate_options:
_A = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_A = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_A = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_A = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_A = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_A = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_A = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_A = None
# start main text
_A = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_A = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_A = st.text_input("""Enter your question here:""", """""")
else:
_A = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_A , _A = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
_A , _A = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
_A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_A = support_list[:1_0]
_A = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_A , _A = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_A , _A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_A = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_A = res[1].strip()
if sec_titles == "":
_A = """[{}]({})""".format(res[0], wiki_url)
else:
_A = sec_titles.split(""" & """)
_A = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_A = find_nearest_training(question)
_A = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_A = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_A = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 242 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCAmelCase : Union[str, Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase : List[Any] = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase : List[Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase : str = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCAmelCase : Dict = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''', __a)
return [m.group(0) for m in matches]
def __magic_name__( ):
__lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowerCAmelCase = {
config.replace('''Config''', ''''''): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__lowerCAmelCase = collections.defaultdict(__a)
__lowerCAmelCase = collections.defaultdict(__a)
__lowerCAmelCase = collections.defaultdict(__a)
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__a):
__lowerCAmelCase = None
if _re_tf_models.match(__a) is not None:
__lowerCAmelCase = tf_models
__lowerCAmelCase = _re_tf_models.match(__a).groups()[0]
elif _re_flax_models.match(__a) is not None:
__lowerCAmelCase = flax_models
__lowerCAmelCase = _re_flax_models.match(__a).groups()[0]
elif _re_pt_models.match(__a) is not None:
__lowerCAmelCase = pt_models
__lowerCAmelCase = _re_pt_models.match(__a).groups()[0]
if lookup_dict is not None:
while len(__a) > 0:
if attr_name in model_prefix_to_model_type:
__lowerCAmelCase = True
break
# Try again after removing the last word in the name
__lowerCAmelCase = ''''''.join(camel_case_split(__a)[:-1])
__lowerCAmelCase = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys()))
__lowerCAmelCase = list(__a)
all_models.sort()
__lowerCAmelCase = {'''model_type''': all_models}
__lowerCAmelCase = [pt_models[t] for t in all_models]
__lowerCAmelCase = [tf_models[t] for t in all_models]
__lowerCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__lowerCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__lowerCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__lowerCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__lowerCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__lowerCAmelCase = '''AutoTokenizer'''
__lowerCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(__a)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__lowerCAmelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
__lowerCAmelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__a, __a, __a):
# The type of pipeline may not exist in this framework
if not hasattr(__a, __a):
continue
# First extract all model_names
__lowerCAmelCase = []
for name in getattr(__a, __a).values():
if isinstance(__a, __a):
model_names.append(__a)
else:
model_names.extend(list(__a))
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names})
return table
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = get_frameworks_table()
__lowerCAmelCase = Dataset.from_pandas(__a)
__lowerCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''', '''pipeline_tags.json''', repo_type='''dataset''', token=__a)
__lowerCAmelCase = Dataset.from_json(__a)
__lowerCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(__a))
}
__lowerCAmelCase = update_pipeline_and_auto_class_table(__a)
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__lowerCAmelCase = sorted(table.keys())
__lowerCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
})
__lowerCAmelCase = Dataset.from_pandas(__a)
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__a, '''frameworks.json'''))
tags_dataset.to_json(os.path.join(__a, '''pipeline_tags.json'''))
if commit_sha is not None:
__lowerCAmelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__lowerCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''', folder_path=__a, repo_type='''dataset''', token=__a, commit_message=__a, )
def __magic_name__( ):
__lowerCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__lowerCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__lowerCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__lowerCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(__a, (list, tuple)):
__lowerCAmelCase = model[0]
__lowerCAmelCase = model.__name__
if model not in in_table.values():
missing.append(__a)
if len(__a) > 0:
__lowerCAmelCase = ''', '''.join(__a)
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"""`utils/update_metadata.py`: {msg}. Please add them!""")
if __name__ == "__main__":
_UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
_UpperCAmelCase : List[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 364 |
'''simple docstring'''
import numpy as np
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ):
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase)
__lowerCAmelCase = np.iscomplexobj(lowerCamelCase)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase, input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase)
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(lowerCamelCase)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase))
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_)
return lambda_, vector
def __magic_name__( ):
__lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]])
__lowerCAmelCase = np.array([4_1, 4, 2_0])
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa)
__lowerCAmelCase = np.triu(1J * complex_input_matrix, 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase)
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 9 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = "MobileNetV1Config"
# Base docstring
lowercase_ = "google/mobilenet_v1_1.0_224"
lowercase_ = [1, 1_0_2_4, 7, 7]
# Image classification docstring
lowercase_ = "google/mobilenet_v1_1.0_224"
lowercase_ = "tabby, tabby cat"
lowercase_ = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int]=None ) -> int:
__a = {}
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = model.mobilenet_va
else:
__a = model
__a = '''MobilenetV1/Conv2d_0/'''
__a = backbone.conv_stem.convolution.weight
__a = backbone.conv_stem.normalization.bias
__a = backbone.conv_stem.normalization.weight
__a = backbone.conv_stem.normalization.running_mean
__a = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a = i + 1
__a = i * 2
__a = backbone.layer[pt_index]
__a = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__a = pointer.convolution.weight
__a = pointer.normalization.bias
__a = pointer.normalization.weight
__a = pointer.normalization.running_mean
__a = pointer.normalization.running_var
__a = backbone.layer[pt_index + 1]
__a = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__a = pointer.convolution.weight
__a = pointer.normalization.bias
__a = pointer.normalization.weight
__a = pointer.normalization.running_mean
__a = pointer.normalization.running_var
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__a = model.classifier.weight
__a = model.classifier.bias
return tf_to_pt_map
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> Tuple:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__a = tf.train.list_variables(lowerCAmelCase__ )
__a = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
__a = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
__a = array
# Build TF to PyTorch weights loading map
__a = _build_tf_to_pytorch_map(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
__a = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__a = np.transpose(lowerCAmelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__a = array.squeeze().transpose()
else:
__a = np.transpose(lowerCAmelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
__a = torch.from_numpy(lowerCAmelCase__ )
tf_weights.pop(lowerCAmelCase__ , lowerCAmelCase__ )
tf_weights.pop(name + '''/RMSProp''' , lowerCAmelCase__ )
tf_weights.pop(name + '''/RMSProp_1''' , lowerCAmelCase__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , lowerCAmelCase__ )
logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( lowerCAmelCase__ : torch.Tensor , lowerCAmelCase__ : nn.Convad ) -> torch.Tensor:
__a , __a = features.shape[-2:]
__a , __a = conv_layer.stride
__a , __a = conv_layer.kernel_size
if in_height % stride_height == 0:
__a = max(kernel_height - stride_height , 0 )
else:
__a = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a = max(kernel_width - stride_width , 0 )
else:
__a = max(kernel_width - (in_width % stride_width) , 0 )
__a = pad_along_width // 2
__a = pad_along_width - pad_left
__a = pad_along_height // 2
__a = pad_along_height - pad_top
__a = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCAmelCase__ , lowerCAmelCase__ , '''constant''' , 0.0 )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a = 1 , _a = 1 , _a = False , _a = True , _a = True , ):
super().__init__()
__a = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__a = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , stride=_a , padding=_a , groups=_a , bias=_a , padding_mode='''zeros''' , )
if use_normalization:
__a = nn.BatchNormad(
num_features=_a , eps=config.layer_norm_eps , momentum=0.9997 , affine=_a , track_running_stats=_a , )
else:
__a = None
if use_activation:
if isinstance(_a , _a ):
__a = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _a ):
__a = ACTaFN[config.hidden_act]
else:
__a = config.hidden_act
else:
__a = None
def __UpperCAmelCase ( self , _a ):
if self.config.tf_padding:
__a = apply_tf_padding(_a , self.convolution )
__a = self.convolution(_a )
if self.normalization is not None:
__a = self.normalization(_a )
if self.activation is not None:
__a = self.activation(_a )
return features
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = MobileNetVaConfig
__UpperCAmelCase : Optional[int] = load_tf_weights_in_mobilenet_va
__UpperCAmelCase : Optional[Any] = 'mobilenet_v1'
__UpperCAmelCase : Tuple = 'pixel_values'
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowercase_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = True ):
super().__init__(_a )
__a = config
__a = 32
__a = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a = MobileNetVaConvLayer(
_a , in_channels=config.num_channels , out_channels=_a , kernel_size=3 , stride=2 , )
__a = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a = nn.ModuleList()
for i in range(13 ):
__a = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=3 , stride=strides[i] , groups=_a , ) )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=1 , ) )
__a = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , ):
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__a = self.conv_stem(_a )
__a = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a = layer_module(_a )
if output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = hidden_states
if self.pooler is not None:
__a = torch.flatten(self.pooler(_a ) , start_dim=1 )
else:
__a = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=_a , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__(_a )
__a = config.num_labels
__a = MobileNetVaModel(_a )
__a = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a = nn.Dropout(config.classifier_dropout_prob , inplace=_a )
__a = nn.Linear(_a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , _a = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.mobilenet_va(_a , output_hidden_states=_a , return_dict=_a )
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier(self.dropout(_a ) )
__a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a = '''single_label_classification'''
else:
__a = '''multi_label_classification'''
if self.config.problem_type == "regression":
__a = MSELoss()
if self.num_labels == 1:
__a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a = BCEWithLogitsLoss()
__a = loss_fct(_a , _a )
if not return_dict:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , )
| 45 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 214 | 0 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
A_, A_ : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
A_ : Optional[Any] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
A_ : str = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
A_ : List[str] = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 316 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316 | 1 |
'''simple docstring'''
def snake_case_ (_a : int = 1_0_0 ):
UpperCAmelCase = set()
UpperCAmelCase = 0
UpperCAmelCase = n + 1 # maximum limit
for a in range(2 , _a ):
for b in range(2 , _a ):
UpperCAmelCase = a**b # calculates the current power
collect_powers.add(_a ) # adds the result to the set
return len(_a )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 34 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A =logging.get_logger(__name__)
A ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __a ):
__a : List[Any] = """marian"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _a ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowercase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowercase , lowercase )
UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase )
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
| 34 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ) -> Any:
_a = 1
_a = 3
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type='''v_prediction''' )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_a = output.images
_a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__UpperCAmelCase , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_a = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type='''v_prediction''' )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a = '''A painting of a squirrel eating a burger'''
_a = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_a = output.images
assert image.shape[0] == 2
_a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_a = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type='''v_prediction''' )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_a = unet.half()
_a = text_encoder.half()
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , ).images
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
_a = '''stabilityai/stable-diffusion-x4-upscaler'''
_a = StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_a = '''a cat sitting on a park bench'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _UpperCAmelCase ( self ) -> Dict:
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
_a = '''stabilityai/stable-diffusion-x4-upscaler'''
_a = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_a = '''a cat sitting on a park bench'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _UpperCAmelCase ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_a = '''stabilityai/stable-diffusion-x4-upscaler'''
_a = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = '''a cat sitting on a park bench'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type='''np''' , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 153 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> str:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_multiple_size
_a = hidden_act
_a = hidden_dropout
_a = attention_dropout
_a = weight_tying
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a , _a , _a = self.prepare_config_and_inputs()
_a = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = GPTNeoXJapaneseModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_a = True
_a = GPTNeoXJapaneseModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = True
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
_a = output_from_no_past['''hidden_states'''][0]
_a = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
A_ : Tuple = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
A_ : List[str] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
A_ : Any = False
A_ : Optional[Any] = False
A_ : Tuple = False
A_ : Optional[int] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = GPTNeoXJapaneseModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
# This regression test was failing with PyTorch < 1.3
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
_a = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = '''abeja/gpt-neox-japanese-2.7b'''
_a = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_a = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_a = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
_a = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase )
_a = []
for prompt in prompts:
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' ).input_ids
_a = model.generate(__UpperCAmelCase , max_length=50 )
_a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) | 153 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'vivit'
def __init__( self : Optional[Any] , _A : List[str]=224 , _A : Dict=32 , _A : Union[str, Any]=[2, 16, 16] , _A : Dict=3 , _A : int=768 , _A : Optional[Any]=12 , _A : Union[str, Any]=12 , _A : int=3_072 , _A : List[str]="gelu_fast" , _A : Any=0.0 , _A : int=0.0 , _A : List[str]=0.0_2 , _A : List[str]=1e-06 , _A : Tuple=True , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : str = num_frames
UpperCAmelCase__ : int = tubelet_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Dict = qkv_bias
super().__init__(**_A )
| 181 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Any = min(lowerCAmelCase__ ) # min() finds the minimum value
UpperCAmelCase__ : Optional[int] = max(lowerCAmelCase__ ) # max() finds the maximum value
UpperCAmelCase__ : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCAmelCase__ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCAmelCase__ : Optional[int] = 0
for count in range(lowerCAmelCase__ ):
while holes[count] > 0:
holes[count] -= 1
UpperCAmelCase__ : Dict = count + min_val
i += 1
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase__ )
print('''Sorted order is:''' , ''' '''.join(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
| 181 | 1 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class __UpperCamelCase ( a__ ):
# warning at import time
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , a__ , )
| 79 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : List[str] = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''PerceiverFeatureExtractor''']
a : str = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 10
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [1, 2, 3, 4]
UpperCamelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case__ , self.block_size , 0 ) , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case__ , self.block_size , 0 ) , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case__ , self.block_size , 0 ) , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
UpperCamelCase_ , UpperCamelCase_ = process_story(snake_case__ )
self.assertEqual(snake_case__ , [] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ""
UpperCamelCase_ , UpperCamelCase_ = process_story(snake_case__ )
self.assertEqual(snake_case__ , [] )
self.assertEqual(snake_case__ , [] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
UpperCamelCase_ , UpperCamelCase_ = process_story(snake_case__ )
UpperCamelCase_ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(snake_case__ , snake_case__ )
UpperCamelCase_ = ["It was the best of times."]
self.assertEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = torch.tensor([1, 2, 3, 4] )
UpperCamelCase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case__ , 0 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCamelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case__ , 23 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCamelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case__ , 1 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 101
UpperCamelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCamelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCamelCase_ = compute_token_type_ids(snake_case__ , snake_case__ )
np.testing.assert_array_equal(snake_case__ , snake_case__ )
| 128 |
# Imports
import numpy as np
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
self.set_matricies(red=snake_case__ , green=snake_case__ , blue=snake_case__ , red_edge=snake_case__ , nir=snake_case__ )
def _lowerCamelCase ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if red is not None:
UpperCamelCase_ = red
if green is not None:
UpperCamelCase_ = green
if blue is not None:
UpperCamelCase_ = blue
if red_edge is not None:
UpperCamelCase_ = red_edge
if nir is not None:
UpperCamelCase_ = nir
return True
def _lowerCamelCase ( self , snake_case__="" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
self.set_matricies(red=snake_case__ , green=snake_case__ , blue=snake_case__ , red_edge=snake_case__ , nir=snake_case__ )
UpperCamelCase_ = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def _lowerCamelCase ( self ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowerCamelCase ( self ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def _lowerCamelCase ( self ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowerCamelCase ( self , snake_case__=0.08 , snake_case__=1.22 , snake_case__=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / self.green) - 1
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir - self.green
def _lowerCamelCase ( self ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _lowerCamelCase ( self , snake_case__=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowerCamelCase ( self , snake_case__=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowerCamelCase ( self ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _lowerCamelCase ( self , snake_case__=None , snake_case__=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / self.red
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCamelCase_ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowerCamelCase ( self ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / self.red
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 128 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class A__ :
def __init__( self ) -> List[Any]:
'''simple docstring'''
A_ = """"""
A_ = """"""
A_ = []
A_ = 0
A_ = 256
A_ = 0
A_ = 0
A_ = 0
A_ = 0
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = cva.imread(UpperCamelCase__ , 0 )
A_ = copy.deepcopy(self.img )
A_ , A_ , A_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
A_ = np.sum(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
A_ = x[i] / self.k
self.sk += prk
A_ = (self.L - 1) * self.sk
if self.rem != 0:
A_ = int(last % last )
A_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase__ )
A_ = int(np.ma.count(self.img ) / self.img[1].size )
A_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
A_ = self.img[j][i]
if num != self.last_list[num]:
A_ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__lowerCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if not postfix_notation:
return 0
A_ = {"""+""", """-""", """*""", """/"""}
A_ = []
for token in postfix_notation:
if token in operations:
A_ , A_ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 0 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class A__ :
def __init__( self : Tuple , _a : Any , _a : int , _a : int ) -> List[str]:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_SCREAMING_SNAKE_CASE =img
_SCREAMING_SNAKE_CASE =img.shape[1]
_SCREAMING_SNAKE_CASE =img.shape[0]
_SCREAMING_SNAKE_CASE =dst_width
_SCREAMING_SNAKE_CASE =dst_height
_SCREAMING_SNAKE_CASE =self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE =self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE =_SCREAMING_SNAKE_CASE =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def A ( self : Any ) -> Tuple:
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_SCREAMING_SNAKE_CASE =self.img[self.get_y(_a )][self.get_x(_a )]
def A ( self : int , _a : int ) -> int:
'''simple docstring'''
return int(self.ratio_x * x )
def A ( self : Dict , _a : int ) -> int:
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase : Optional[Any] = 8_0_0, 6_0_0
lowerCamelCase : str = imread("image_data/lena.jpg", 1)
lowerCamelCase : Optional[int] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 47 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=16 , lowerCamelCase__=[32, 64, 128] , lowerCamelCase__=[1, 2, 1] , lowerCamelCase__=[2, 2, 4] , lowerCamelCase__=2 , lowerCamelCase__=2.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.02 , lowerCamelCase__=1E-5 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=10 , lowerCamelCase__=8 , lowerCamelCase__=["stage1", "stage2"] , lowerCamelCase__=[1, 2] , ):
"""simple docstring"""
__UpperCamelCase : Tuple =parent
__UpperCamelCase : Any =batch_size
__UpperCamelCase : int =image_size
__UpperCamelCase : str =patch_size
__UpperCamelCase : Optional[Any] =num_channels
__UpperCamelCase : Any =embed_dim
__UpperCamelCase : str =hidden_sizes
__UpperCamelCase : List[Any] =depths
__UpperCamelCase : str =num_heads
__UpperCamelCase : Optional[int] =window_size
__UpperCamelCase : Dict =mlp_ratio
__UpperCamelCase : Dict =qkv_bias
__UpperCamelCase : Optional[Any] =hidden_dropout_prob
__UpperCamelCase : Tuple =attention_probs_dropout_prob
__UpperCamelCase : str =drop_path_rate
__UpperCamelCase : str =hidden_act
__UpperCamelCase : str =use_absolute_embeddings
__UpperCamelCase : Union[str, Any] =patch_norm
__UpperCamelCase : Dict =layer_norm_eps
__UpperCamelCase : str =initializer_range
__UpperCamelCase : str =is_training
__UpperCamelCase : List[str] =scope
__UpperCamelCase : int =use_labels
__UpperCamelCase : Optional[int] =type_sequence_label_size
__UpperCamelCase : Optional[Any] =encoder_stride
__UpperCamelCase : Optional[int] =out_features
__UpperCamelCase : Optional[int] =out_indices
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : List[Any] =None
if self.use_labels:
__UpperCamelCase : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Optional[Any] =self.get_config()
return config, pixel_values, labels
def __lowercase ( self ):
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : int =model(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCamelCase : Optional[Any] =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Tuple =FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[Any] =model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__UpperCamelCase : str =None
__UpperCamelCase : Optional[int] =FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[str] =model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[Any] =model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCamelCase : List[Any] =1
__UpperCamelCase : Union[str, Any] =FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Union[str, Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase : Union[str, Any] =model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.type_sequence_label_size
__UpperCamelCase : int =FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : int =model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase : Dict =1
__UpperCamelCase : List[Any] =FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : str =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase : Tuple =model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =config_and_inputs
__UpperCamelCase : Optional[Any] ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Tuple =(
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : int =False
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : Dict =False
UpperCamelCase__ : str =False
UpperCamelCase__ : Union[str, Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =FocalNetModelTester(self )
__UpperCamelCase : List[Any] =ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self ):
"""simple docstring"""
return
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def __lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def __lowercase ( self ):
"""simple docstring"""
pass
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase : List[Any] =model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase : Tuple =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase : int =model_class(lowerCamelCase__ )
__UpperCamelCase : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : List[str] =[*signature.parameters.keys()]
__UpperCamelCase : Union[str, Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__UpperCamelCase : Tuple =model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__UpperCamelCase : List[str] =outputs.hidden_states
__UpperCamelCase : Dict =getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
__UpperCamelCase : Optional[Any] =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase : List[str] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCamelCase : List[str] =outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =reshaped_hidden_states[0].shape
__UpperCamelCase : List[Any] =(
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase : Tuple =True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : List[Any] =True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Optional[int] =3
__UpperCamelCase : Dict =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCamelCase : Union[str, Any] =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase : int =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCamelCase : Tuple =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase : Optional[Any] =True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : int =True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str =FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str =_config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple =model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(lowerCamelCase__ )
__UpperCamelCase : Tuple =self.default_image_processor
__UpperCamelCase : List[Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__UpperCamelCase : Optional[Any] =image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__UpperCamelCase : Tuple =model(**lowerCamelCase__ )
# verify the logits
__UpperCamelCase : Any =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__UpperCamelCase : List[str] =torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =(FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase__ : str =FocalNetConfig
UpperCamelCase__ : Union[str, Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =FocalNetModelTester(self )
| 245 |
import itertools
import math
def A ( a_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(a_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ) -> Tuple:
__UpperCamelCase : Optional[Any] =2
while True:
if is_prime(a_ ):
yield num
num += 1
def A ( a_ = 10_001 ) -> int:
return next(itertools.islice(prime_generator() ,nth - 1 ,a_ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 245 | 1 |
from collections import defaultdict
from math import gcd
def lowerCamelCase_ ( _a = 1_500_000 ):
"""simple docstring"""
lowerCAmelCase__ : defaultdict = defaultdict(UpperCAmelCase_ )
lowerCAmelCase__ : str = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
lowerCAmelCase__ : List[str] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 131 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Dict = len(UpperCAmelCase_ ) # No of vertices in graph
UpperCAmelCase : Tuple = [0] * n
UpperCAmelCase : List[Any] = [False] * n
def dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = True
UpperCAmelCase : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , id_ )
UpperCAmelCase : Optional[Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase : Dict = min(low[at] , low[to] )
UpperCAmelCase : list[tuple[int, int]] = []
for i in range(UpperCAmelCase_ ):
if not visited[i]:
dfs(UpperCAmelCase_ , -1 , UpperCAmelCase_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = XGLMConfig
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = ffn_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 1
def __A ( self ) -> Optional[int]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ) -> int:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : int = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@slow
def __A ( self ) -> Tuple:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __A ( self ) -> Tuple:
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self , lowerCAmelCase__=True ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
SCREAMING_SNAKE_CASE = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE = tokenizer('Today is a nice day and' , return_tensors='tf' )
SCREAMING_SNAKE_CASE = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = 'left'
# use different length sentences to test batching
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='tf' , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['input_ids']
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 371 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : Any = """FlavaImageProcessor"""
SCREAMING_SNAKE_CASE_ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ) -> str:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __A ( self ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 38 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ,A : List[str] ,A : str=13 ,A : str=7 ,A : List[str]=True ,A : Optional[int]=True ,A : str=True ,A : Dict=True ,A : Optional[int]=99 ,A : Optional[Any]=32 ,A : int=5 ,A : Dict=4 ,A : Optional[int]=37 ,A : Tuple="gelu" ,A : List[str]=0.1 ,A : List[str]=0.1 ,A : Any=1_28 ,A : str=32 ,A : Any=16 ,A : List[Any]=2 ,A : List[str]=0.02 ,A : Tuple=3 ,A : Optional[int]=4 ,A : Any=None ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def UpperCamelCase_ ( self : Tuple ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : int ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Tuple ):
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = self.prepare_config_and_inputs()
__A = True
__A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self : str ,A : Tuple ,A : List[str] ,A : List[Any] ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : Optional[int] ):
__A = NezhaModel(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,token_type_ids=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[int] ,A : Union[str, Any] ,A : Any ,A : Any ,A : Dict ,A : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Union[str, Any] ,):
__A = True
__A = NezhaModel(A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,)
__A = model(
A ,attention_mask=A ,token_type_ids=A ,encoder_hidden_states=A ,)
__A = model(A ,attention_mask=A ,token_type_ids=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : int ,A : int ,A : Any ,A : List[str] ,A : Tuple ,A : Optional[int] ,A : Any ,A : Union[str, Any] ):
__A = NezhaForMaskedLM(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Tuple ,A : Union[str, Any] ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ):
__A = NezhaForNextSentencePrediction(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self : Dict ,A : int ,A : List[Any] ,A : Optional[Any] ,A : Union[str, Any] ,A : Any ,A : Tuple ,A : Any ):
__A = NezhaForPreTraining(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,next_sentence_label=A ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self : Optional[int] ,A : int ,A : Any ,A : Optional[Any] ,A : Tuple ,A : Optional[Any] ,A : List[Any] ,A : List[Any] ):
__A = NezhaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : List[Any] ,A : Union[str, Any] ,A : Tuple ,A : Tuple ,A : List[str] ):
__A = self.num_labels
__A = NezhaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ,A : Union[str, Any] ,A : Tuple ,A : List[str] ,A : Union[str, Any] ,A : Optional[Any] ):
__A = self.num_labels
__A = NezhaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ,A : Any ,A : str ,A : Optional[int] ,A : Tuple ,A : Union[str, Any] ,A : Any ):
__A = self.num_choices
__A = NezhaForMultipleChoice(config=A )
model.to(A )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Dict ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def UpperCamelCase_ ( self : Tuple ,A : Any ,A : Union[str, Any] ,A : int=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class in get_values(A ):
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def UpperCamelCase_ ( self : int ):
__A = NezhaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A )
def UpperCamelCase_ ( self : Dict ):
# This regression test was failing with PyTorch < 1.3
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__A = None
self.model_tester.create_and_check_model_as_decoder(
A ,A ,A ,A ,A ,A ,A ,A ,A ,)
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A )
def UpperCamelCase_ ( self : Any ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCamelCase_ ( self : int ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = NezhaModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : Tuple ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__A = True
__A = model_class(config=A )
__A = self._prepare_for_class(A ,A )
__A = torch.jit.trace(
A ,(inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A ,os.path.join(A ,"bert.pt" ) )
__A = torch.jit.load(os.path.join(A ,"bert.pt" ) ,map_location=A )
loaded(inputs_dict["input_ids"].to(A ) ,inputs_dict["attention_mask"].to(A ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : List[Any] ):
__A = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
__A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__A = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A = model(A ,attention_mask=A )[0]
__A = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape ,A )
__A = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : str ):
__A = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
__A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__A = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A = model(A ,attention_mask=A )[0]
__A = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape ,A )
__A = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A ,atol=1E-4 ) )
| 15 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=_snake_case , )
assert hasattr(self , """env""" )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Any:
lowerCamelCase_ =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
lowerCamelCase_ ={"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_snake_case , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_snake_case , py_version="""py36""" , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
TrainingJobAnalytics(_snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]:
lowerCamelCase_ =self.create_estimator(_snake_case )
# run training
estimator.fit()
# result dataframe
lowerCamelCase_ =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase_ =list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase_ =list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase_ =(
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _snake_case )
| 357 |
from math import pi, sqrt
def __UpperCamelCase ( _A : float ) ->float:
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(_A ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(_A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__A : List[Any] = 1.0
while num:
__A : str = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 49 | 0 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _A (lowerCAmelCase__ :List[Any] ) -> Any:
'''simple docstring'''
_a = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def _A (lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
_a , _a = emb.weight.shape
_a = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
_a = emb.weight.data
return lin_layer
def _A (lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
_a = torch.load(lowerCAmelCase__ , map_location='cpu' )
_a = Namespace(**checkpoint['cfg']['model'] )
_a = checkpoint['model']
remove_ignore_keys_(lowerCAmelCase__ )
_a = state_dict['decoder.embed_tokens.weight'].shape[0]
_a = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_a = XGLMConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_a = XGLMForCausalLM(lowerCAmelCase__ )
_a = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
print(lowerCAmelCase__ )
_a = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
a_ : str = parser.parse_args()
a_ : Optional[int] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 168 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = (KDPMaDiscreteScheduler,)
_lowerCAmelCase = 1_0
def __UpperCAmelCase ( self , **__magic_name__ ) -> int:
_a = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**__magic_name__ )
return config
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='v_prediction' )
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __UpperCAmelCase ( self ) -> Tuple:
if torch_device == "mps":
return
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __UpperCAmelCase ( self ) -> List[Any]:
if torch_device == "mps":
return
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ )
_a = self.dummy_model()
_a = self.dummy_sample_deter.to(__magic_name__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if str(__magic_name__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 168 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=[1, 16, 4, 4] , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = scope
lowerCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCamelCase = (self.image_size // 32) ** 2
lowerCamelCase = num_patches + 1
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_a , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = ViTHybridModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = ViTHybridForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTHybridModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = _config_zero_init(_a )
for model_class in self.all_model_classes:
lowerCamelCase = model_class(config=_a )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCamelCase = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = ViTHybridModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a__ ( ) -> Union[str, Any]:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_a )
# verify the logits
lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
@require_accelerate
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCamelCase = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" )
lowerCamelCase = model(**_a )
lowerCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCamelCase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 353 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *_a , _a=None , _a=None , **_a ):
"""simple docstring"""
super().__init__(*_a , **_a )
lowerCamelCase = eval_examples
lowerCamelCase = post_process_function
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = "eval" ):
"""simple docstring"""
lowerCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase = self.get_eval_dataloader(_a )
lowerCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase = self.post_process_function(_a , _a , output.predictions )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
else:
lowerCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def _lowerCAmelCase ( self , _a , _a , _a=None , _a = "test" ):
"""simple docstring"""
lowerCamelCase = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase = self.post_process_function(_a , _a , output.predictions , """predict""" )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 168 | 0 |
def __A ( __lowerCamelCase = 10 ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or n < 0:
raise ValueError("""Invalid input""" )
a = 10**n
a = 2_8433 * (pow(2 , 783_0457 , __lowerCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }')
| 228 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__UpperCamelCase : Dict[Optional[str], str] = {}
__UpperCamelCase : Dict[Optional[str], Exception] = {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ) -> Optional[int]:
a = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
a = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
a = format_type
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ) -> List[str]:
a = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
__UpperCamelCase : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
__UpperCamelCase : List[str] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
__UpperCamelCase : List[str] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __A ( __lowerCamelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __A ( __lowerCamelCase , **__lowerCamelCase ) -> Formatter:
a = get_format_type_from_alias(__lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 228 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Optional[int]:
super().__init__(
features=__UpperCAmelCase ,cache_dir=__UpperCAmelCase ,keep_in_memory=__UpperCAmelCase ,streaming=__UpperCAmelCase ,num_proc=__UpperCAmelCase ,**__UpperCAmelCase ,)
lowerCAmelCase__ : List[Any] = Generator(
cache_dir=__UpperCAmelCase ,features=__UpperCAmelCase ,generator=__UpperCAmelCase ,gen_kwargs=__UpperCAmelCase ,**__UpperCAmelCase ,)
def UpperCAmelCase_ ( self ) -> Dict:
# Build iterable dataset
if self.streaming:
lowerCAmelCase__ : Optional[int] = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Optional[Any] = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase ,download_mode=__UpperCAmelCase ,verification_mode=__UpperCAmelCase ,base_path=__UpperCAmelCase ,num_proc=self.num_proc ,)
lowerCAmelCase__ : Optional[Any] = self.builder.as_dataset(
split="""train""" ,verification_mode=__UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
| 369 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
_lowerCAmelCase = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
_lowerCAmelCase = {
'''ctrl''': 256,
}
_lowerCAmelCase = {
'''Pregnancy''': 16_8629,
'''Christianity''': 7675,
'''Explain''': 10_6423,
'''Fitness''': 6_3440,
'''Saving''': 6_3163,
'''Ask''': 2_7171,
'''Ass''': 9_5985,
'''Joke''': 16_3509,
'''Questions''': 4_5622,
'''Thoughts''': 4_9605,
'''Retail''': 5_2342,
'''Feminism''': 16_4338,
'''Writing''': 1_1992,
'''Atheism''': 19_2263,
'''Netflix''': 4_8616,
'''Computing''': 3_9639,
'''Opinion''': 4_3213,
'''Alone''': 4_4967,
'''Funny''': 5_8917,
'''Gaming''': 4_0358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 7_7138,
'''Diet''': 3_6206,
'''Legal''': 1_1859,
'''Norman''': 4939,
'''Tip''': 7_2689,
'''Weight''': 5_2343,
'''Movies''': 4_6273,
'''Running''': 2_3425,
'''Science''': 2090,
'''Horror''': 3_7793,
'''Confession''': 6_0572,
'''Finance''': 1_2250,
'''Politics''': 1_6360,
'''Scary''': 19_1985,
'''Support''': 1_2654,
'''Technologies''': 3_2516,
'''Teenage''': 6_6160,
'''Event''': 3_2769,
'''Learned''': 6_7460,
'''Notion''': 18_2770,
'''Wikipedia''': 3_7583,
'''Books''': 6665,
'''Extract''': 7_6050,
'''Confessions''': 10_2701,
'''Conspiracy''': 7_5932,
'''Links''': 6_3674,
'''Narcissus''': 15_0425,
'''Relationship''': 5_4766,
'''Relationships''': 13_4796,
'''Reviews''': 4_1671,
'''News''': 4256,
'''Translation''': 2_6820,
'''multilingual''': 12_8406,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = set()
lowerCAmelCase__ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : str = char
lowerCAmelCase__ : int = set(UpperCamelCase )
return pairs
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : str = CONTROL_CODES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[Any]:
super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase )
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : List[Any] = json.load(__UpperCAmelCase )
lowerCAmelCase__ : str = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Any = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ : Tuple = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : int = {}
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Optional[int]:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : int = tuple(__UpperCAmelCase )
lowerCAmelCase__ : str = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCAmelCase__ : Tuple = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : Tuple = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = bigram
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : int = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ : Any = word.index(__UpperCAmelCase ,__UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Any = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : Any = tuple(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ : List[str] = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase )
lowerCAmelCase__ : int = word[:-4]
lowerCAmelCase__ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Optional[int] = re.findall(R"""\S+\n?""" ,__UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
return self.decoder.get(__UpperCAmelCase ,self.unk_token )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip()
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Tuple = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : str = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" )
lowerCAmelCase__ : Optional[int] = 0
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : List[str] = token_index
writer.write(""" """.join(__UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 184 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.