code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : str = ''''''
lowercase_ : Tuple = ''''''
lowercase_ : List[str] = []
lowercase_ : Optional[Any] = 0
lowercase_ : str = 2_56
lowercase_ : Any = 0
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 0
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = cva.imread(__SCREAMING_SNAKE_CASE , 0 )
lowercase_ : Dict = copy.deepcopy(self.img )
lowercase_ , lowercase_ , lowercase_ : Dict = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
lowercase_ : str = np.sum(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : Optional[int] = x[i] / self.k
self.sk += prk
lowercase_ : Dict = (self.L - 1) * self.sk
if self.rem != 0:
lowercase_ : int = int(last % last )
lowercase_ : List[Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = int(np.ma.count(self.img ) / self.img[1].size )
lowercase_ : str = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase_ : Dict = self.img[j][i]
if num != self.last_list[num]:
lowercase_ : Tuple = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def _snake_case ( self ):
"""simple docstring"""
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
_lowercase : Any = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
_lowercase : Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 264 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : str = "▁"
_lowercase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Dict = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_lowercase : Optional[Any] = {
"facebook/xglm-564M": 2_0_4_8,
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase_ : Optional[Any] = 7
lowercase_ : List[Any] = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase_ : Tuple = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : List[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase_ : Dict = len(self.sp_model )
lowercase_ : int = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : Optional[Any] = None
lowercase_ : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Optional[Any] = {}
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase_ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : str = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : str = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 264 | 1 |
"""simple docstring"""
def lowercase (snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case__ )
if number < 0:
return False
lowerCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 1_6
a = 3_2
def lowercase (snake_case__ : Accelerator , snake_case__ : int = 16 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def lowercase (snake_case__ : Any , snake_case__ : List[str] ) -> str:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
lowerCAmelCase = 2
# New Code #
lowerCAmelCase = int(args.gradient_accumulation_steps )
lowerCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config["""lr"""]
lowerCAmelCase = int(config["""num_epochs"""] )
lowerCAmelCase = int(config["""seed"""] )
lowerCAmelCase = int(config["""batch_size"""] )
lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(snake_case__ )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
def lowercase () -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=snake_case__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 155 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
_enforce_args(__UpperCAmelCase , __UpperCAmelCase )
if n == 0:
return 0
lowercase__: int = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase__: Optional[int] = max(
__UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , __UpperCAmelCase ) )
return max_revue
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_enforce_args(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[Any] = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase__: int = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase__: Union[str, Any] = max(
__UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __UpperCAmelCase , __UpperCAmelCase ) , )
lowercase__: Any = max_revenue
return max_rev[n]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_enforce_args(__UpperCAmelCase , __UpperCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase__: int = [float('''-inf''' ) for _ in range(n + 1 )]
lowercase__: Union[str, Any] = 0
for i in range(1 , n + 1 ):
lowercase__: Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
lowercase__: List[Any] = max(__UpperCAmelCase , prices[j - 1] + max_rev[i - j] )
lowercase__: Optional[int] = max_revenue_i
return max_rev[n]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if n < 0:
lowercase__: List[Any] = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__UpperCAmelCase )
if n > len(__UpperCAmelCase ):
lowercase__: str = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
lowercase__: List[str] = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
lowercase__: Optional[int] = len(__UpperCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase__: Tuple = 3_6
lowercase__: List[str] = top_down_cut_rod(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Union[str, Any] = bottom_up_cut_rod(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[int] = naive_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 2 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 1 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =F"Input value of [number={number}] must be an integer"
raise TypeError(_UpperCAmelCase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(_UpperCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[int] = (6_4,) , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :str = "silu" , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :float = 0.18_215 , SCREAMING_SNAKE_CASE :str = "group" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_a : Union[str, Any] =Encoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =vq_embed_dim if vq_embed_dim is not None else latent_channels
_a : Optional[int] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
_a : str =VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
# pass init params to Decoder
_a : List[str] =Decoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> VQEncoderOutput:
'''simple docstring'''
_a : Optional[int] =self.encoder(SCREAMING_SNAKE_CASE )
_a : int =self.quant_conv(SCREAMING_SNAKE_CASE )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
_a , _a , _a : Tuple =self.quantize(SCREAMING_SNAKE_CASE )
else:
_a : str =h
_a : Dict =self.post_quant_conv(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_a : Tuple =sample
_a : int =self.encode(SCREAMING_SNAKE_CASE ).latents
_a : List[Any] =self.decode(SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
| 276 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'timesformer'
def __init__( self : Tuple,lowercase_ : str=2_2_4,lowercase_ : Any=1_6,lowercase_ : str=3,lowercase_ : Tuple=8,lowercase_ : int=7_6_8,lowercase_ : Tuple=1_2,lowercase_ : int=1_2,lowercase_ : Any=3_0_7_2,lowercase_ : str="gelu",lowercase_ : str=0.0,lowercase_ : Dict=0.0,lowercase_ : str=0.02,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=True,lowercase_ : int="divided_space_time",lowercase_ : Dict=0,**lowercase_ : Dict,)-> List[str]:
'''simple docstring'''
super().__init__(**lowercase_ )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = num_frames
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = qkv_bias
A__ = attention_type
A__ = drop_path_rate
| 282 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'bart'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple,lowercase_ : Optional[int]=5_0_2_6_5,lowercase_ : List[str]=1_0_2_4,lowercase_ : Any=1_2,lowercase_ : Optional[Any]=4_0_9_6,lowercase_ : str=1_6,lowercase_ : int=1_2,lowercase_ : Optional[Any]=4_0_9_6,lowercase_ : Any=1_6,lowercase_ : Any=0.0,lowercase_ : str=0.0,lowercase_ : Optional[Any]="gelu",lowercase_ : List[str]=1_0_2_4,lowercase_ : List[Any]=0.1,lowercase_ : Union[str, Any]=0.0,lowercase_ : Optional[int]=0.0,lowercase_ : List[Any]=0.02,lowercase_ : int=0.0,lowercase_ : Optional[Any]=False,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=3,lowercase_ : int=1,lowercase_ : int=0,lowercase_ : List[str]=2,lowercase_ : Optional[int]=True,lowercase_ : Tuple=2,lowercase_ : List[str]=2,**lowercase_ : Dict,)-> List[Any]:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = classifier_dropout
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowercase_,pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,decoder_start_token_id=lowercase_,forced_eos_token_id=lowercase_,**lowercase_,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated',lowercase_ ):
A__ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
class A ( _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Dict )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A__ = {0: 'batch'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'decoder_sequence'}
A__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A__ , A__ = self.num_layers
for i in range(lowercase_ ):
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def snake_case__ ( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super().outputs
else:
A__ = super(lowercase_,self ).outputs
if self.use_past:
A__ , A__ = self.num_layers
for i in range(lowercase_ ):
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def snake_case__ ( self : Tuple,lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ )
# Generate decoder inputs
A__ = seq_length if not self.use_past else 1
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ )
A__ = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A__ = dict(**lowercase_,**lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A__ , A__ = common_inputs['input_ids'].shape
A__ = common_inputs['decoder_input_ids'].shape[1]
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = decoder_seq_length + 3
A__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A__ = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowercase_,lowercase_ )],dim=1 )
A__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A__ , A__ = self.num_layers
A__ = min(lowercase_,lowercase_ )
A__ = max(lowercase_,lowercase_ ) - min_num_layers
A__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
A__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowercase_,lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def snake_case__ ( self : List[str],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A__ , A__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ , A__ = self.num_layers
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = common_inputs['attention_mask'].dtype
A__ = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowercase_,lowercase_,dtype=lowercase_ )],dim=1 )
A__ = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def snake_case__ ( self : Union[str, Any],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = compute_effective_axis_dimension(
lowercase_,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = tokenizer.num_special_tokens_to_add(lowercase_ )
A__ = compute_effective_axis_dimension(
lowercase_,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
A__ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A__ = dict(tokenizer(lowercase_,return_tensors=lowercase_ ) )
return common_inputs
def snake_case__ ( self : Union[str, Any],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
elif self.task == "causal-lm":
A__ = self._generate_dummy_inputs_for_causal_lm(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
else:
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
return common_inputs
def snake_case__ ( self : int,lowercase_ : Tuple,lowercase_ : int,lowercase_ : int,lowercase_ : str )-> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super()._flatten_past_key_values_(lowercase_,lowercase_,lowercase_,lowercase_ )
else:
A__ = super(lowercase_,self )._flatten_past_key_values_(
lowercase_,lowercase_,lowercase_,lowercase_ )
| 282 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : str = RoCBertTokenizer
a__ : Union[str, Any] = None
a__ : List[Any] = False
a__ : str = True
a__ : Optional[int] = filter_non_english
def _lowercase (self : Union[str, Any] ):
super().setUp()
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for i, value in enumerate(__a ):
UpperCAmelCase_ = i
UpperCAmelCase_ = i
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase_ = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def _lowercase (self : Any ):
UpperCAmelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowercase (self : Dict ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowercase (self : Dict ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ = {}
for i, token in enumerate(__a ):
UpperCAmelCase_ = i
UpperCAmelCase_ = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowercase (self : Any ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowercase (self : Optional[int] ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowercase (self : Union[str, Any] ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
UpperCAmelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def _lowercase (self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
UpperCAmelCase_ = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
UpperCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowercase (self : Dict ):
UpperCAmelCase_ = ["的", "人", "有"]
UpperCAmelCase_ = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = False
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase_ = tokenizer.encode("你好" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("你是谁" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ = "你好,你是谁"
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
UpperCAmelCase_ = tokenizer.convert_tokens_to_shape_ids(__a )
UpperCAmelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(__a )
UpperCAmelCase_ = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KT''')
lowerCAmelCase__ = TypeVar('''VT''')
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : KT | str = "root" ,lowercase__ : VT | None = None ):
__lowercase = key
__lowercase = value
__lowercase = []
def __repr__( self : Tuple ):
return F"Node({self.key}: {self.value})"
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return len(self.forward )
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : float = 0.5 ,lowercase__ : int = 1_6 ):
__lowercase = Node[KT, VT]()
__lowercase = 0
__lowercase = p
__lowercase = max_level
def __str__( self : List[str] ):
__lowercase = list(self )
if len(lowercase__ ) == 0:
return F"SkipList(level={self.level})"
__lowercase = max((len(str(lowercase__ ) ) for item in items) ,default=4 )
__lowercase = max(lowercase__ ,4 ) + 4
__lowercase = self.head
__lowercase = []
__lowercase = node.forward.copy()
lines.append(F"[{node.key}]".ljust(lowercase__ ,'''-''' ) + '''* ''' * len(lowercase__ ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
while len(node.forward ) != 0:
__lowercase = node.forward[0]
lines.append(
F"[{node.key}]".ljust(lowercase__ ,'''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
__lowercase = node.forward
lines.append('''None'''.ljust(lowercase__ ) + '''* ''' * len(lowercase__ ) )
return F"SkipList(level={self.level})\n" + "\n".join(lowercase__ )
def __iter__( self : List[str] ):
__lowercase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowercase = node.forward[0]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : str ):
__lowercase = []
__lowercase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowercase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
for i, update_node in enumerate(lowercase__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowercase = node.forward[i]
else:
__lowercase = update_node.forward[:i]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
__lowercase = value
else:
__lowercase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,lowercase__ ):
update_vector.append(self.head )
__lowercase = level
__lowercase = Node(lowercase__ ,lowercase__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase__ )
else:
__lowercase = new_node
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
return node.value
return None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
assert len(A__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
if len(A__ ) != 4:
print()
assert len(A__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
assert skip_list.find('''Some key''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(A__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _A ( ):
"""simple docstring"""
def is_sorted(A__ ):
return all(next_item >= item for item, next_item in zip(A__ , lst[1:] ) )
__lowercase = SkipList()
for i in range(10 ):
skip_list.insert(A__ , A__ )
assert is_sorted(list(A__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A__ ) )
def _A ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""informer"""
a_ : Dict ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "student_t" , UpperCamelCase : str = "nll" , UpperCamelCase : int = 1 , UpperCamelCase : List[int] = None , UpperCamelCase : Optional[Union[str, bool]] = "mean" , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : int = 64 , UpperCamelCase : int = 32 , UpperCamelCase : int = 32 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : bool = True , UpperCamelCase : str = "gelu" , UpperCamelCase : float = 0.05 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : int = 1_00 , UpperCamelCase : float = 0.02 , UpperCamelCase : Tuple=True , UpperCamelCase : str = "prob" , UpperCamelCase : int = 5 , UpperCamelCase : bool = True , **UpperCamelCase : List[str] , ):
'''simple docstring'''
_snake_case : List[Any] = prediction_length
_snake_case : List[Any] = context_length or prediction_length
_snake_case : Dict = distribution_output
_snake_case : int = loss
_snake_case : Dict = input_size
_snake_case : int = num_time_features
_snake_case : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_snake_case : Optional[Any] = scaling
_snake_case : Any = num_dynamic_real_features
_snake_case : str = num_static_real_features
_snake_case : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_snake_case : Tuple = cardinality
else:
_snake_case : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_snake_case : int = embedding_dimension
else:
_snake_case : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case : int = num_parallel_samples
# Transformer architecture configuration
_snake_case : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
_snake_case : Union[str, Any] = d_model
_snake_case : int = encoder_attention_heads
_snake_case : Optional[Any] = decoder_attention_heads
_snake_case : int = encoder_ffn_dim
_snake_case : Dict = decoder_ffn_dim
_snake_case : str = encoder_layers
_snake_case : List[str] = decoder_layers
_snake_case : Optional[Any] = dropout
_snake_case : int = attention_dropout
_snake_case : int = activation_dropout
_snake_case : str = encoder_layerdrop
_snake_case : List[str] = decoder_layerdrop
_snake_case : Dict = activation_function
_snake_case : Tuple = init_std
_snake_case : Dict = use_cache
# Informer
_snake_case : List[Any] = attention_type
_snake_case : Any = sampling_factor
_snake_case : Dict = distil
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 369 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: int , lowerCAmelCase: List[Any] )-> Dict:
# Initialise PyTorch model
_snake_case : Dict = RemBertConfig.from_json_file(lowerCAmelCase )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase ) ) )
_snake_case : Optional[Any] = RemBertModel(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase ) )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 260 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class snake_case_:
__UpperCamelCase = LEDConfig
__UpperCamelCase = {}
__UpperCamelCase = '''gelu'''
def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : List[Any]=9_9 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : str=2_0 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Any=4 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : int = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Any = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : List[str] = eos_token_id
lowerCAmelCase : Optional[int] = pad_token_id
lowerCAmelCase : str = bos_token_id
lowerCAmelCase : List[str] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCAmelCase : Optional[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCAmelCase : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowerCAmelCase : List[Any] = prepare_led_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = tf.concat(
[tf.zeros_like(UpperCamelCase_ )[:, :-1], tf.ones_like(UpperCamelCase_ )[:, -1:]] , axis=-1 , )
lowerCAmelCase : Any = global_attention_mask
return config, inputs_dict
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[str] ):
lowerCAmelCase : str = TFLEDModel(config=UpperCamelCase_ ).get_decoder()
lowerCAmelCase : int = inputs_dict['''input_ids''']
lowerCAmelCase : Optional[int] = input_ids[:1, :]
lowerCAmelCase : Optional[int] = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase : List[Any] = 1
# first forward pass
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowerCAmelCase, lowerCAmelCase : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : str=None , _snake_case : Optional[Any]=None , _snake_case : Union[str, Any]=None , _snake_case : Union[str, Any]=None , ):
if attention_mask is None:
lowerCAmelCase : Tuple = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = TFLEDModelTester(self )
lowerCAmelCase : Any = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : str = tf.zeros_like(inputs_dict['''attention_mask'''] )
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : int = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
lowerCAmelCase : int = True
lowerCAmelCase : Tuple = self.model_tester.seq_length
lowerCAmelCase : Optional[int] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Optional[int] = [t.numpy() for t in outputs.encoder_attentions]
lowerCAmelCase : List[str] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : str = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
self.assertEqual(config.output_hidden_states , UpperCamelCase_ )
check_encoder_attentions_output(UpperCamelCase_ )
if self.is_encoder_decoder:
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase_ )
check_decoder_attentions_output(UpperCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase_ )
check_encoder_attentions_output(UpperCamelCase_ )
# Check attention is always last and order is fine
lowerCAmelCase : List[str] = True
lowerCAmelCase : Dict = True
lowerCAmelCase : str = model_class(UpperCamelCase_ )
lowerCAmelCase : Dict = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase_ )
check_encoder_attentions_output(UpperCamelCase_ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def lowerCamelCase__ ( self : List[Any] ):
pass
def lowerCamelCase__ ( self : Tuple ):
# TODO: Head-masking not yet implement
pass
def _snake_case ( _snake_case : List[str] ):
return tf.constant(_snake_case , dtype=tf.intaa )
snake_case__ : Union[str, Any] = 1e-4
@slow
@require_tf
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
lowerCAmelCase : str = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase : Union[str, Any] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase : Optional[Any] = prepare_led_inputs_dict(model.config , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model(**UpperCamelCase_ )[0]
lowerCAmelCase : List[Any] = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , UpperCamelCase_ )
# change to expected output here
lowerCAmelCase : Optional[int] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase_ , atol=1E-3 )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
lowerCAmelCase : Dict = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase : str = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase : Dict = prepare_led_inputs_dict(model.config , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(**UpperCamelCase_ )[0]
lowerCAmelCase : Any = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
# change to expected output here
lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase_ , atol=1E-3 , rtol=1E-3 )
| 60 |
"""simple docstring"""
def _snake_case ( _snake_case : list ):
def merge(_snake_case : list , _snake_case : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_snake_case ) <= 1:
return collection
lowerCAmelCase : Union[str, Any] = len(_snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 60 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__: List[Any] = logging.get_logger(__name__)
__magic_name__: Any = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case__ ( __snake_case , __snake_case ):
lowercase__ : List[Any] = "swin"
lowercase__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowerCAmelCase__=2_24 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=96 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[3, 6, 12, 24] , lowerCAmelCase__=7 , lowerCAmelCase__=4.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=32 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> int:
super().__init__(**lowerCamelCase_ )
__magic_name__ : List[Any] = image_size
__magic_name__ : List[Any] = patch_size
__magic_name__ : Optional[int] = num_channels
__magic_name__ : Dict = embed_dim
__magic_name__ : str = depths
__magic_name__ : Any = len(lowerCamelCase_ )
__magic_name__ : Optional[Any] = num_heads
__magic_name__ : List[Any] = window_size
__magic_name__ : List[str] = mlp_ratio
__magic_name__ : Tuple = qkv_bias
__magic_name__ : Optional[int] = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = drop_path_rate
__magic_name__ : Any = hidden_act
__magic_name__ : str = use_absolute_embeddings
__magic_name__ : Optional[int] = layer_norm_eps
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
__magic_name__ : Optional[Any] = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
__magic_name__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class snake_case__ ( __snake_case ):
lowercase__ : int = version.parse('''1.11''' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ ( self ) -> float:
return 1e-4
| 365 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__magic_name__ : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Dict , **A : int ):
super().__init__(**A )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(A )
def _A ( self : Optional[int] , **A : Optional[Any] ):
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Union[str, Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
_UpperCAmelCase : Tuple = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
_UpperCAmelCase : List[Any] = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
_UpperCAmelCase : List[str] = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
_UpperCAmelCase : List[str] = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
_UpperCAmelCase : List[str] = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
_UpperCAmelCase : str = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
_UpperCAmelCase : str = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
_UpperCAmelCase : str = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
_UpperCAmelCase : List[Any] = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
_UpperCAmelCase : Any = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
_UpperCAmelCase : str = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[Any] , A : List[str] , *A : List[Any] , A : str=None , A : str=None , **A : str ):
return super().__call__(A , *A , num_workers=A , batch_size=A , **A )
def _A ( self : Optional[Any] , A : Optional[Any] , A : List[Any]=64 , A : int = 0 , A : float = 512 / 1500 , A : Optional[int] = 32 , A : Optional[int] = 1 , ):
_UpperCAmelCase : List[str] = load_image(A )
_UpperCAmelCase : int = self.image_processor.size["longest_edge"]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.image_processor.generate_crop_boxes(
A , A , A , A , A , A )
_UpperCAmelCase : List[Any] = self.image_processor(images=A , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
_UpperCAmelCase : List[str] = self.get_inference_context()
with inference_context():
_UpperCAmelCase : Any = self._ensure_tensor_on_device(A , device=self.device )
_UpperCAmelCase : Dict = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
_UpperCAmelCase : List[str] = image_embeddings
_UpperCAmelCase : Union[str, Any] = grid_points.shape[1]
_UpperCAmelCase : Union[str, Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , A , A ):
_UpperCAmelCase : int = grid_points[:, i : i + points_per_batch, :, :]
_UpperCAmelCase : Dict = input_labels[:, i : i + points_per_batch]
_UpperCAmelCase : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _A ( self : List[str] , A : List[str] , A : Tuple=0.88 , A : Union[str, Any]=0.95 , A : Union[str, Any]=0 , A : Optional[Any]=1 , ):
_UpperCAmelCase : Any = model_inputs.pop("input_boxes" )
_UpperCAmelCase : Optional[Any] = model_inputs.pop("is_last" )
_UpperCAmelCase : str = model_inputs.pop("original_sizes" ).tolist()
_UpperCAmelCase : Optional[int] = model_inputs.pop("reshaped_input_sizes" ).tolist()
_UpperCAmelCase : List[Any] = self.model(**A )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_UpperCAmelCase : Union[str, Any] = model_outputs["pred_masks"]
_UpperCAmelCase : List[str] = self.image_processor.post_process_masks(
A , A , A , A , binarize=A )
_UpperCAmelCase : Any = model_outputs["iou_scores"]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , A , A , A , A , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _A ( self : List[str] , A : List[str] , A : Optional[int]=False , A : List[str]=False , A : Tuple=0.7 , ):
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : int = []
_UpperCAmelCase : Tuple = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
_UpperCAmelCase : int = torch.cat(A )
_UpperCAmelCase : Tuple = torch.cat(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.image_processor.post_process_for_mask_generation(
A , A , A , A )
_UpperCAmelCase : Any = defaultdict(A )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A )
_UpperCAmelCase : List[str] = {}
if output_rle_mask:
_UpperCAmelCase : Optional[Any] = rle_mask
if output_bboxes_mask:
_UpperCAmelCase : str = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 31 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A : Dict , A : Dict=7 , A : Optional[int]=3 , A : Optional[int]=18 , A : Dict=30 , A : List[Any]=400 , A : Union[str, Any]=True , A : Tuple=None , A : List[Any]=True , A : int=None , A : Optional[int]=True , ):
_UpperCAmelCase : Optional[int] = size if size is not None else {"shortest_edge": 20}
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : int = min_resolution
_UpperCAmelCase : Optional[int] = max_resolution
_UpperCAmelCase : List[str] = do_resize
_UpperCAmelCase : Optional[Any] = size
_UpperCAmelCase : Tuple = do_center_crop
_UpperCAmelCase : Optional[int] = crop_size
_UpperCAmelCase : Optional[Any] = do_flip_channel_order
def _A ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Tuple = MobileViTImageProcessor if is_vision_available() else None
def _A ( self : List[Any] ):
_UpperCAmelCase : Any = MobileViTImageProcessingTester(self )
@property
def _A ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Tuple ):
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "size" ) )
self.assertTrue(hasattr(A , "do_center_crop" ) )
self.assertTrue(hasattr(A , "center_crop" ) )
self.assertTrue(hasattr(A , "do_flip_channel_order" ) )
def _A ( self : Any ):
_UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _A ( self : Any ):
pass
def _A ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : Union[str, Any] ):
# Initialize image_processing
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[int] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : Any ):
# Initialize image_processing
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Any = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 31 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ = 1e-12, snake_case_ = 1_0_0, ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case_ ) == np.iscomplexobj(snake_case_ )
a = np.iscomplexobj(snake_case_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case_, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
a = False
a = 0
a = 0
a = 1e12
while not convergence:
# Multiple matrix by the vector.
a = np.dot(snake_case_, snake_case_ )
# Normalize the resulting output vector.
a = w / np.linalg.norm(snake_case_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
a = vector.conj().T if is_complex else vector.T
a = np.dot(snake_case_, np.dot(snake_case_, snake_case_ ) )
# Check convergence.
a = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
a = True
a = lambda_
if is_complex:
a = np.real(lambda_ )
return lambda_, vector
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
a = np.array([4_1, 4, 2_0] )
a = real_input_matrix.astype(np.complexaaa )
a = np.triu(1J * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
a = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
a = real_input_matrix
a = real_vector
elif problem_type == "complex":
a = complex_input_matrix
a = complex_vector
# Our implementation.
a , a = power_iteration(snake_case_, snake_case_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
a , a = np.linalg.eigh(snake_case_ )
# Last eigenvalue is the maximum one.
a = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
a = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case_ ) - np.abs(snake_case_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 366 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCamelCase__ : Any = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
UpperCamelCase__ : Optional[Any] = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
UpperCamelCase__ : Optional[Any] = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
UpperCamelCase__ : List[str] = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
UpperCamelCase__ : Optional[int] = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]:
"""simple docstring"""
for tf_name, hf_name in patterns:
a = k.replace(snake_case_, snake_case_ )
return k
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
a = BigBirdPegasusConfig(**snake_case_ )
a = BigBirdPegasusForConditionalGeneration(snake_case_ )
a = torch_model.state_dict()
a = {}
# separating decoder weights
a = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
a = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ):
a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
a = DECODER_PATTERNS
a = rename_state_dict_key(snake_case_, snake_case_ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
a = v.T
a = torch.from_numpy(snake_case_ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ):
a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
a = REMAINING_PATTERNS
a = rename_state_dict_key(snake_case_, snake_case_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
a = v.T
a = torch.from_numpy(snake_case_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
a = mapping['''model.embed_positions.weight''']
a = mapping.pop('''model.embed_positions.weight''' )
a , a = torch_model.load_state_dict(snake_case_, strict=snake_case_ )
a = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict:
"""simple docstring"""
a = tf.train.list_variables(snake_case_ )
a = {}
a = ['''global_step''']
for name, shape in tqdm(snake_case_, desc='''converting tf checkpoint to dict''' ):
a = any(pat in name for pat in ignore_name )
if skip_key:
continue
a = tf.train.load_variable(snake_case_, snake_case_ )
a = array
return tf_weights
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = get_tf_weights_as_numpy(snake_case_ )
a = convert_bigbird_pegasus(snake_case_, snake_case_ )
torch_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
UpperCamelCase__ : int = parser.parse_args()
UpperCamelCase__ : Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330 | 0 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def a_ ( lowerCamelCase : int ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCamelCase_ : Dict = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
UpperCamelCase_ : List[Any] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
UpperCamelCase_ : List[str] = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
UpperCamelCase_ : List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
UpperCamelCase_ : Any = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _lowercase ( self ) -> Dict:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) ,homepage="https://github.com/openai/human-eval" ,codebase_urls=["https://github.com/openai/human-eval"] ,reference_urls=["https://github.com/openai/human-eval"] ,license=_LICENSE ,)
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=[1, 10, 100] ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=3.0 ) -> Any:
if os.getenv("HF_ALLOW_CODE_EVAL" ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_SCREAMING_SNAKE_CASE ) as executor:
_snake_case = []
_snake_case = Counter()
_snake_case = 0
_snake_case = defaultdict(_SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
_snake_case = candidate + "\n" + test_case
_snake_case = (test_program, timeout, task_id, completion_id[task_id])
_snake_case = executor.submit(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE )
futures.append(_SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_SCREAMING_SNAKE_CASE ):
_snake_case = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
_snake_case , _snake_case = [], []
for result in results.values():
result.sort()
_snake_case = [r[1]["passed"] for r in result]
total.append(len(_SCREAMING_SNAKE_CASE ) )
correct.append(sum(_SCREAMING_SNAKE_CASE ) )
_snake_case = np.array(_SCREAMING_SNAKE_CASE )
_snake_case = np.array(_SCREAMING_SNAKE_CASE )
_snake_case = k
_snake_case = {f"""pass@{k}""": estimate_pass_at_k(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __a ( _UpperCamelCase: Optional[Any] , _UpperCamelCase: int , _UpperCamelCase: str ) -> int:
"""simple docstring"""
def estimator(_UpperCamelCase: int , _UpperCamelCase: int , _UpperCamelCase: int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = itertools.repeat(_UpperCamelCase , len(_UpperCamelCase ) )
else:
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
_snake_case = iter(_UpperCamelCase )
return np.array([estimator(int(_UpperCamelCase ) , int(_UpperCamelCase ) , _UpperCamelCase ) for n, c in zip(_UpperCamelCase , _UpperCamelCase )] )
| 142 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCamelCase_ : int = '''pytorch_model.bin'''
UpperCamelCase_ : str = '''pytorch_model.bin.index.json'''
UpperCamelCase_ : int = '''adapter_config.json'''
UpperCamelCase_ : str = '''adapter_model.bin'''
UpperCamelCase_ : str = '''adapter_model.safetensors'''
UpperCamelCase_ : List[Any] = '''tf_model.h5'''
UpperCamelCase_ : Union[str, Any] = '''tf_model.h5.index.json'''
UpperCamelCase_ : Tuple = '''model.ckpt'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack.index.json'''
UpperCamelCase_ : Dict = '''model.safetensors'''
UpperCamelCase_ : List[Any] = '''model.safetensors.index.json'''
UpperCamelCase_ : Tuple = '''config.json'''
UpperCamelCase_ : List[str] = '''preprocessor_config.json'''
UpperCamelCase_ : List[Any] = FEATURE_EXTRACTOR_NAME
UpperCamelCase_ : Union[str, Any] = '''generation_config.json'''
UpperCamelCase_ : str = '''modelcard.json'''
UpperCamelCase_ : List[Any] = '''▁'''
UpperCamelCase_ : Tuple = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCamelCase_ : Any = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCamelCase_ : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCamelCase_ : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __a ( _UpperCamelCase: Optional[Any] ) -> int:
"""simple docstring"""
if version.parse(_UpperCamelCase ) < version.parse(_UpperCamelCase ):
if "dev" in min_version:
_snake_case = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
_snake_case = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 142 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = '▁'
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
SCREAMING_SNAKE_CASE :Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
'facebook/s2t-small-librispeech-asr': 1024,
}
SCREAMING_SNAKE_CASE :Optional[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
SCREAMING_SNAKE_CASE :Union[str, Any] = {'mustc': MUSTC_LANGS}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = MAX_MODEL_INPUT_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = []
def __init__( self : Optional[Any] ,A : List[str] ,A : str ,A : Tuple="<s>" ,A : Dict="</s>" ,A : List[str]="<pad>" ,A : Tuple="<unk>" ,A : Dict=False ,A : Tuple=False ,A : str=None ,A : List[str]=None ,A : Optional[Dict[str, Any]] = None ,**A : str ,):
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,do_upper_case=A ,do_lower_case=A ,tgt_lang=A ,lang_codes=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = do_upper_case
__A = do_lower_case
__A = load_json(A )
__A = {v: k for k, v in self.encoder.items()}
__A = spm_file
__A = load_spm(A ,self.sp_model_kwargs )
if lang_codes is not None:
__A = lang_codes
__A = LANGUAGES[lang_codes]
__A = [f'''<lang:{lang}>''' for lang in self.langs]
__A = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
__A = self.lang_tokens
__A = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__A = {}
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return len(self.encoder )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, Any] ):
__A = new_tgt_lang
self.set_tgt_lang_special_tokens(A )
def UpperCamelCase_ ( self : Optional[int] ,A : str ):
__A = self.lang_code_to_id[tgt_lang]
__A = [lang_code_id]
def UpperCamelCase_ ( self : str ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[int] ):
return self.encoder.get(A ,self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : str ,A : int ):
return self.decoder.get(A ,self.unk_token )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ):
__A = []
__A = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__A = self.sp_model.decode(A )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__A = []
else:
current_sub_tokens.append(A )
__A = self.sp_model.decode(A )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
__A = [1] * len(self.prefix_tokens )
__A = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Union[str, Any] ,A : Dict ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = load_spm(self.spm_file ,self.sp_model_kwargs )
def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ):
__A = Path(A )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
__A = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
__A = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder ,A )
if os.path.abspath(self.spm_file ) != os.path.abspath(A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,A )
elif not os.path.isfile(self.spm_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (str(A ), str(A ))
def UpperCAmelCase ( a_ , a_ ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
__A = sentencepiece.SentencePieceProcessor(**a_ )
spm.Load(str(a_ ) )
return spm
def UpperCAmelCase ( a_ ) -> Union[Dict, List]:
"""simple docstring"""
with open(a_ , "r" ) as f:
return json.load(a_ )
def UpperCAmelCase ( a_ , a_ ) -> None:
"""simple docstring"""
with open(a_ , "w" ) as f:
json.dump(a_ , a_ , indent=2 )
| 15 |
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: str = str(bin(snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: Optional[Any] = str(bin(snake_case ) )[2:]
if shift_amount >= len(snake_case ):
return "0b0"
lowercase__: Optional[int] = binary_number[: len(snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number >= 0: # Get binary representation of positive number
lowercase__: Union[str, Any] = '0' + str(bin(snake_case ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase__: Dict = len(bin(snake_case )[3:] ) # Find 2's complement of number
lowercase__: int = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
lowercase__: Any = (
'1' + '0' * (binary_number_length - len(snake_case )) + binary_number
)
if shift_amount >= len(snake_case ):
return "0b" + binary_number[0] * len(snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =checkpoint
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_in.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_in.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.norm_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.norm_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_in.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_in.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.norm_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.norm_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['quant_conv.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['quant_conv.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['post_quant_conv.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
_SCREAMING_SNAKE_CASE =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
_SCREAMING_SNAKE_CASE ={
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(_UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_SCREAMING_SNAKE_CASE =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
_SCREAMING_SNAKE_CASE ={
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(_UpperCamelCase )
}
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =[key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
_SCREAMING_SNAKE_CASE =vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
_SCREAMING_SNAKE_CASE =vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"down.{i}.block", 'new': f"down_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'encoder.mid.block' in key]
_SCREAMING_SNAKE_CASE =2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE =[key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'encoder.mid.attn' in key]
_SCREAMING_SNAKE_CASE =renew_vae_attention_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
conv_attn_to_linear(_UpperCamelCase )
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =num_up_blocks - 1 - i
_SCREAMING_SNAKE_CASE =[
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
_SCREAMING_SNAKE_CASE =vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
_SCREAMING_SNAKE_CASE =vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"up.{block_id}.block", 'new': f"up_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'decoder.mid.block' in key]
_SCREAMING_SNAKE_CASE =2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE =[key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'decoder.mid.attn' in key]
_SCREAMING_SNAKE_CASE =renew_vae_attention_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
conv_attn_to_linear(_UpperCamelCase )
return new_checkpoint
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str , ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
_SCREAMING_SNAKE_CASE =io.BytesIO(r.content )
_SCREAMING_SNAKE_CASE =OmegaConf.load(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =5_12
_SCREAMING_SNAKE_CASE ='cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
_SCREAMING_SNAKE_CASE ={}
with safe_open(_UpperCamelCase , framework='pt' , device='cpu' ) as f:
for key in f.keys():
_SCREAMING_SNAKE_CASE =f.get_tensor(_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location=_UpperCamelCase )['state_dict']
# Convert the VAE model.
_SCREAMING_SNAKE_CASE =create_vae_diffusers_config(_UpperCamelCase , image_size=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =custom_convert_ldm_vae_checkpoint(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =AutoencoderKL(**_UpperCamelCase )
vae.load_state_dict(_UpperCamelCase )
vae.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowerCamelCase : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 114 | 0 |
from manim import *
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = Rectangle(height=0.5 , width=0.5)
a__ : str = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ : Any = Rectangle(height=0.25 , width=0.25)
a__ : List[Any] = [mem.copy() for i in range(6)]
a__ : Optional[Any] = [mem.copy() for i in range(6)]
a__ : Tuple = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : Dict = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : List[Any] = VGroup(lowercase , lowercase).arrange(lowercase , buff=0)
a__ : List[str] = Text('CPU' , font_size=24)
a__ : Optional[Any] = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase)
a__ : Dict = [mem.copy() for i in range(4)]
a__ : Optional[Any] = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : str = Text('GPU' , font_size=24)
a__ : Any = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase)
gpu.move_to([-1, -1, 0])
self.add(lowercase)
a__ : Any = [mem.copy() for i in range(6)]
a__ : List[str] = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : int = Text('Model' , font_size=24)
a__ : List[Any] = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase)
model.move_to([3, -1.0, 0])
self.add(lowercase)
a__ : Dict = []
a__ : str = []
for i, rect in enumerate(lowercase):
a__ : Tuple = fill.copy().set_fill(lowercase , opacity=0.8)
target.move_to(lowercase)
model_arr.append(lowercase)
a__ : int = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowercase , opacity=0.8)
cpu_target.move_to(cpu_left_col_base[i])
model_cpu_arr.append(lowercase)
self.add(*lowercase , *lowercase)
a__ : Dict = [meta_mem.copy() for i in range(6)]
a__ : List[Any] = [meta_mem.copy() for i in range(6)]
a__ : Union[str, Any] = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : Dict = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : List[Any] = VGroup(lowercase , lowercase).arrange(lowercase , buff=0)
a__ : Dict = Text('Disk' , font_size=24)
a__ : Optional[int] = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase)
disk.move_to([-4, -1.25, 0])
self.add(lowercase , lowercase)
a__ : List[str] = Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ : List[Any] = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase , lowercase)
a__ : Dict = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(lowercase)
a__ : Dict = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase))
a__ : int = Square(0.3)
input.set_fill(lowercase , opacity=1.0)
input.set_stroke(width=0.0)
input.next_to(model_base[0] , lowercase , buff=0.5)
self.play(Write(lowercase))
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowercase , buff=0.02)
self.play(MoveToTarget(lowercase))
self.play(FadeOut(lowercase))
a__ : str = Arrow(start=lowercase , end=lowercase , color=lowercase , buff=0.5)
a.next_to(model_arr[0].get_left() , lowercase , buff=0.2)
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0])
a__ : Union[str, Any] = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase , run_time=3))
a__ : Union[str, Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(lowercase) , Circumscribe(model_arr[0] , color=lowercase , **lowercase) , Circumscribe(model_cpu_arr[0] , color=lowercase , **lowercase) , Circumscribe(gpu_rect[0] , color=lowercase , **lowercase) , )
self.play(MoveToTarget(model_cpu_arr[0]))
a__ : List[Any] = a.copy()
for i in range(6):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowercase , buff=0.2)
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02)
a__ : Any = AnimationGroup(
FadeOut(lowercase , run_time=0.5) , MoveToTarget(lowercase , run_time=0.5) , FadeIn(lowercase , run_time=0.5) , lag_ratio=0.2)
self.play(lowercase)
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i])
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0])
if i >= 1:
a__ : Tuple = 0.7
self.play(
Circumscribe(model_arr[i] , **lowercase) , Circumscribe(cpu_left_col_base[i] , **lowercase) , Circumscribe(cpu_left_col_base[i + 1] , color=lowercase , **lowercase) , Circumscribe(gpu_rect[0] , color=lowercase , **lowercase) , Circumscribe(model_arr[i + 1] , color=lowercase , **lowercase) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i]) , MoveToTarget(model_cpu_arr[i + 1]) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1])
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2)
self.play(
Circumscribe(model_arr[-1] , color=lowercase , **lowercase) , Circumscribe(cpu_left_col_base[-1] , color=lowercase , **lowercase) , Circumscribe(gpu_rect[0] , color=lowercase , **lowercase) , )
self.play(MoveToTarget(model_cpu_arr[i]))
a__ : Optional[int] = a_c
a__ : str = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5)
self.play(
FadeOut(lowercase) , FadeOut(lowercase , run_time=0.5) , )
a__ : List[Any] = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24)
step_a.move_to([2, 2, 0])
self.play(Write(lowercase , run_time=3) , MoveToTarget(lowercase))
self.wait()
| 99 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : List[str] = batch_size
a__ : List[str] = image_size
a__ : Dict = patch_size
a__ : Optional[Any] = num_channels
a__ : List[Any] = is_training
a__ : str = use_labels
a__ : Dict = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : List[str] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : Dict = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : List[str] = (image_size // patch_size) ** 2
a__ : Any = num_patches + 1
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : Tuple = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : int = ViTMSNModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.type_sequence_label_size
a__ : List[str] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , labels=lowercase)
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}')
print('Labels: {labels}')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ : List[str] = 1
a__ : Optional[int] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[Any] = config_and_inputs
a__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__A : Tuple = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : Any = False
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = ViTMSNModelTester(self)
a__ : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowercase)
a__ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = ViTMSNModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> Dict:
a__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(2)
a__ : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(lowercase)
a__ : Any = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Tuple = model(**lowercase)
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Any = torch.tensor([-0.08_03, -0.44_54, -0.23_75]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
| 99 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ) -> Optional[Any]:
snake_case__ : Optional[int] = {}
if train_file is not None:
snake_case__ : List[Any] = [train_file]
if eval_file is not None:
snake_case__ : Tuple = [eval_file]
if test_file is not None:
snake_case__ : Union[str, Any] = [test_file]
snake_case__ : str = datasets.load_dataset("""csv""" , data_files=_lowerCAmelCase )
snake_case__ : Optional[Any] = list(ds[list(files.keys() )[0]].features.keys() )
snake_case__ : List[Any] = features_name.pop(_lowerCAmelCase )
snake_case__ : Union[str, Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case__ : int = {label: i for i, label in enumerate(_lowerCAmelCase )}
snake_case__ : List[Any] = tokenizer.model_input_names
snake_case__ : int = {}
if len(_lowerCAmelCase ) == 1:
for k in files.keys():
snake_case__ : int = ds[k].map(
lambda _lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" ) , batched=_lowerCAmelCase , )
elif len(_lowerCAmelCase ) == 2:
for k in files.keys():
snake_case__ : List[str] = ds[k].map(
lambda _lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , ) , batched=_lowerCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
snake_case__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case__ : List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case__ : Optional[int] = (
tf.data.Dataset.from_generator(
_lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case__ : str = (
tf.data.Dataset.from_generator(
_lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case__ : Any = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__a = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(metadata={"help": "Which column contains the label"} )
lowercase = field(default=_a , metadata={"help": "The path of the training file"} )
lowercase = field(default=_a , metadata={"help": "The path of the development file"} )
lowercase = field(default=_a , metadata={"help": "The path of the test file"} )
lowercase = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase = field(default=_a , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __snake_case( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case__ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case__ : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCAmelCase ) -> Dict:
snake_case__ : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case__ : Optional[int] = TFTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case__ : int = trainer.evaluate()
snake_case__ : Optional[Any] = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_lowerCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(_lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 43 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""Input value must be an 'int' type""" )
snake_case__ : List[str] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 68 |
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48 | 0 |
"""simple docstring"""
def lowerCamelCase (a_ :list[int] , a_ :list[int] , a_ :int) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(a_))
def lowerCamelCase (a_ :list[list[int]] , a_ :int , a_ :list[int] , a_ :int) -> bool:
# Base Case
if index == len(a_):
return True
# Recursive Step
for i in range(a_):
if valid_coloring(graph[index] , a_ , a_):
# Color current vertex
lowercase :List[Any] = i
# Validate coloring
if util_color(a_ , a_ , a_ , index + 1):
return True
# Backtrack
lowercase :Tuple = -1
return False
def lowerCamelCase (a_ :list[list[int]] , a_ :int) -> list[int]:
lowercase :Tuple = [-1] * len(a_)
if util_color(a_ , a_ , a_ , 0):
return colored_vertices
return []
| 361 |
"""simple docstring"""
def lowerCamelCase (a_ :int , a_ :int) -> int:
while a != 0:
lowercase , lowercase :Dict = b % a, a
return b
def lowerCamelCase (a_ :int , a_ :int) -> int:
if gcd(a_ , a_) != 1:
lowercase :List[Any] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(a_)
lowercase , lowercase , lowercase :List[str] = 1, 0, a
lowercase , lowercase , lowercase :int = 0, 1, m
while va != 0:
lowercase :Union[str, Any] = ua // va
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase :Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
lowerCAmelCase_ :Any = size if size is not None else {"""shortest_edge""": 384}
lowerCAmelCase_ :List[Any] = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :List[str] = do_resize
lowerCAmelCase_ :int = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ :Dict = crop_pct if crop_pct is not None else 224 / 256
lowerCAmelCase_ :Tuple = resample
lowerCAmelCase_ :List[Any] = do_rescale
lowerCAmelCase_ :Optional[Any] = rescale_factor
lowerCAmelCase_ :int = do_normalize
lowerCAmelCase_ :Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ :Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , __A , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :List[Any] = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ :Dict = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ :List[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ :int = get_resize_output_image_size(__A , size=__A , default_to_square=__A )
lowerCAmelCase_ :Tuple = resize(image=__A , size=__A , resample=__A , data_format=__A , **__A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__A , size=(shortest_edge, shortest_edge) , data_format=__A , **__A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__A , size=(shortest_edge, shortest_edge) , resample=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> Optional[Any]:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
lowerCAmelCase_ :Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :Dict = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ :Optional[int] = resample if resample is not None else self.resample
lowerCAmelCase_ :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ :int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ :int = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ :Tuple = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ :str = image_std if image_std is not None else self.image_std
lowerCAmelCase_ :str = size if size is not None else self.size
lowerCAmelCase_ :Any = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :int = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :str = [to_numpy_array(__A ) for image in images]
if do_resize:
lowerCAmelCase_ :List[Any] = [self.resize(image=__A , size=__A , crop_pct=__A , resample=__A ) for image in images]
if do_rescale:
lowerCAmelCase_ :Any = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
lowerCAmelCase_ :str = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
lowerCAmelCase_ :Any = [to_channel_dimension_format(__A , __A ) for image in images]
lowerCAmelCase_ :Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 84 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ):
__UpperCamelCase = tokenizer
__UpperCamelCase = tokenizer.bos_token_id
__UpperCamelCase = dataset
__UpperCamelCase = seq_length
__UpperCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any ):
__UpperCamelCase = iter(self.dataset )
__UpperCamelCase = True
while more_examples:
__UpperCamelCase , __UpperCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__UpperCamelCase = False
break
__UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids']
__UpperCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__A ) , self.seq_length ):
__UpperCamelCase = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'streaming': True}
__UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase )
__UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length )
__UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size )
return eval_dataloader
def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
model.eval()
__UpperCamelCase = []
for step, batch in enumerate(__lowercase ):
with torch.no_grad():
__UpperCamelCase = model(__lowercase , labels=__lowercase )
__UpperCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__lowercase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__UpperCamelCase = torch.mean(torch.cat(__lowercase ) )
try:
__UpperCamelCase = torch.exp(__lowercase )
except OverflowError:
__UpperCamelCase = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
a__ : int =Accelerator()
# Parse configuration
a__ : Dict =HfArgumentParser(EvaluationArguments)
a__ : Union[str, Any] =parser.parse_args()
set_seed(args.seed)
# Logging
a__ : List[Any] =logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a__ : Union[str, Any] =create_dataloader(args)
# Prepare everything with our `accelerator`.
a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
a__ , a__ : Any =evaluate(args)
logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 53 | 0 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _lowerCAmelCase ( ) -> tuple[list[int], int]:
__A : Any = [randint(-10_00 , 10_00 ) for i in range(10 )]
__A : str = randint(-50_00 , 50_00 )
return (arr, r)
lowercase__ : Optional[Any] = make_dataset()
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : int ) -> tuple[int, ...]:
for triplet in permutations(__snake_case , 3 ):
if sum(__snake_case ) == target:
return tuple(sorted(__snake_case ) )
return (0, 0, 0)
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : int ) -> tuple[int, int, int]:
arr.sort()
__A : int = len(__snake_case )
for i in range(n - 1 ):
__A ,__A : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _lowerCAmelCase ( ) -> tuple[float, float]:
__A : List[Any] = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
__A : Tuple = '\ntriplet_sum1(*dataset)\n'
__A : int = '\ntriplet_sum2(*dataset)\n'
__A : List[Any] = repeat(setup=__snake_case , stmt=__snake_case , repeat=5 , number=1_00_00 )
__A : Optional[int] = repeat(setup=__snake_case , stmt=__snake_case , repeat=5 , number=1_00_00 )
return (min(__snake_case ), min(__snake_case ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ : Optional[Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""") | 190 |
'''simple docstring'''
from math import pi, sqrt, tan
def _lowerCAmelCase ( __snake_case : float ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__A : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__snake_case , 2 ) * torus_radius * tube_radius
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _lowerCAmelCase ( __snake_case : float ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__A : int = (sidea + sidea + sidea) / 2
__A : Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _lowerCAmelCase ( __snake_case : int , __snake_case : float ) -> float:
if not isinstance(__snake_case , __snake_case ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""") | 190 | 1 |
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 |
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('''T''')
lowerCAmelCase_ : Union[str, Any] = TypeVar('''U''')
class __lowerCAmelCase ( Generic[T, U] ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = key
_UpperCAmelCase : Optional[int] = val
_UpperCAmelCase : DoubleLinkedListNode[T, U] | None = None
_UpperCAmelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__(self ):
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__(self ):
_UpperCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.rear, self.head
def __repr__(self ):
_UpperCAmelCase : Tuple = ["""DoubleLinkedList"""]
_UpperCAmelCase : str = self.head
while node.next is not None:
rep.append(str(lowerCAmelCase__ ) )
_UpperCAmelCase : Tuple = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Any = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_UpperCAmelCase : Union[str, Any] = node
_UpperCAmelCase : str = previous
_UpperCAmelCase : List[Any] = node
_UpperCAmelCase : Any = self.rear
def snake_case_ (self , lowerCAmelCase__ ):
if node.prev is None or node.next is None:
return None
_UpperCAmelCase : Optional[int] = node.next
_UpperCAmelCase : str = node.prev
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : str = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__(self , lowerCAmelCase__ ):
_UpperCAmelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
_UpperCAmelCase : List[Any] = capacity
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__(self ):
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__(self , lowerCAmelCase__ ):
return key in self.cache
def snake_case_ (self , lowerCAmelCase__ ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_UpperCAmelCase : DoubleLinkedListNode[T, U] = self.cache[key]
_UpperCAmelCase : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCAmelCase__ )
return node.val
self.miss += 1
return None
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_UpperCAmelCase : List[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCAmelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_UpperCAmelCase : Optional[int] = DoubleLinkedListNode(lowerCAmelCase__ , lowerCAmelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_UpperCAmelCase : str = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_UpperCAmelCase : Dict = value
self.list.add(lowerCAmelCase__ )
@classmethod
def snake_case_ (cls , lowerCAmelCase__ = 1_2_8 ):
def cache_decorator_inner(lowerCAmelCase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_UpperCAmelCase : Union[str, Any] = LRUCache(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_UpperCAmelCase : List[Any] = func(*lowerCAmelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCAmelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCAmelCase__ , """cache_info""" , lowerCAmelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( __a ):
def snake_case_ (self ):
_UpperCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """depth_multiplier""" ) )
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=0.2_5 , lowerCAmelCase__=8 , lowerCAmelCase__=True , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__="relu6" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1_0 , lowerCAmelCase__=None , ):
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : int = image_size
_UpperCAmelCase : List[str] = depth_multiplier
_UpperCAmelCase : Any = min_depth
_UpperCAmelCase : Dict = tf_padding
_UpperCAmelCase : Dict = int(last_hidden_size * depth_multiplier )
_UpperCAmelCase : Dict = output_stride
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[Any] = classifier_dropout_prob
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Any = scope
def snake_case_ (self ):
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ (self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : Tuple = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ (self ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = config_and_inputs
_UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
snake_case : Tuple = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case : Optional[Any] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case : str = False
snake_case : str = False
snake_case : Optional[Any] = False
snake_case : Optional[int] = False
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = MobileNetVaModelTester(self )
_UpperCAmelCase : str = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def snake_case_ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def snake_case_ (self ):
pass
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(lowerCAmelCase__ )
_UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case_ (self ):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = outputs.hidden_states
_UpperCAmelCase : Tuple = 2_6
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def snake_case_ (self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __A ( ):
_UpperCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case_ (self ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : Optional[int] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : str = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 170 | 0 |
'''simple docstring'''
_A : int = [0, 2, 4, 6, 8]
_A : Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCAmelCase = 0
for digit in range(10 ):
__lowerCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , snake_case_ , snake_case_ )
return result
__lowerCAmelCase = 0
for digita in range(10 ):
__lowerCAmelCase = digita
if (remainder + digita) % 2 == 0:
__lowerCAmelCase = ODD_DIGITS
else:
__lowerCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case_ , snake_case_ , )
return result
def UpperCamelCase_ ( snake_case_ : int = 9 ) -> int:
'''simple docstring'''
__lowerCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(snake_case_ , 0 , [0] * length , snake_case_ )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 229 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = KandinskyVaaControlnetImgaImgPipeline
_SCREAMING_SNAKE_CASE : Dict = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_SCREAMING_SNAKE_CASE : List[Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_SCREAMING_SNAKE_CASE : Dict = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : int ) -> Optional[Any]:
return 32
@property
def a ( self : Union[str, Any] ) -> Dict:
return 32
@property
def a ( self : str ) -> Union[str, Any]:
return self.time_input_dim
@property
def a ( self : Tuple ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def a ( self : Union[str, Any] ) -> List[Any]:
return 1_00
@property
def a ( self : Optional[int] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def a ( self : Tuple ) -> Optional[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self : Any ) -> Dict:
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__lowerCAmelCase = DDIMScheduler(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a ( self : List[Any] ) -> int:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Any ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = init_image.resize((5_12, 5_12) )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__lowerCAmelCase = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__ ) ).float() / 2_5_5.0
__lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCAmelCase = """A robot, 4k photo"""
__lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , strength=0.8_5 , generator=SCREAMING_SNAKE_CASE__ , negative_prompt="""""" , ).to_tuple()
__lowerCAmelCase = pipeline(
image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , hint=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 229 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'falcon'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
def __init__( self : List[Any] , A : int=6_5_0_2_4 , A : int=4_5_4_4 , A : Tuple=3_2 , A : Tuple=7_1 , A : Tuple=1e-5 , A : int=0.02 , A : int=True , A : Dict=0.0 , A : Any=0.0 , A : str=None , A : Optional[int]=False , A : Union[str, Any]=False , A : Optional[Any]=True , A : Tuple=True , A : List[str]=False , A : List[Any]=1_1 , A : Dict=1_1 , **A : str , ):
_UpperCAmelCase : Any = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Tuple = kwargs.pop("n_embed" , A )
_UpperCAmelCase : Dict = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : List[Any] = layer_norm_epsilon
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Dict = hidden_dropout
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : Tuple = bos_token_id
_UpperCAmelCase : Tuple = eos_token_id
_UpperCAmelCase : str = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Union[str, Any] = alibi
_UpperCAmelCase : List[Any] = new_decoder_architecture
_UpperCAmelCase : Dict = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : int = parallel_attn
_UpperCAmelCase : List[Any] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def snake_case_ ( self : Optional[Any] ):
return self.hidden_size // self.num_attention_heads
@property
def snake_case_ ( self : int ):
return not self.alibi
| 202 |
"""simple docstring"""
from collections.abc import Callable
class UpperCAmelCase_ :
def __init__( self : Dict , A : Callable | None = None ):
# Stores actual heap items.
_UpperCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase : dict = {}
# Stores current size of heap.
_UpperCAmelCase : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase : Any = key or (lambda A : x)
def snake_case_ ( self : List[Any] , A : int ):
return int((i - 1) / 2 ) if i > 0 else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case_ ( self : Optional[int] , A : int , A : int ):
_UpperCAmelCase , _UpperCAmelCase : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase , _UpperCAmelCase : Any = self.arr[j], self.arr[i]
def snake_case_ ( self : List[str] , A : int , A : int ):
return self.arr[i][1] < self.arr[j][1]
def snake_case_ ( self : Dict , A : int ):
_UpperCAmelCase : str = self._left(A )
_UpperCAmelCase : str = self._right(A )
_UpperCAmelCase : List[Any] = i
if left is not None and not self._cmp(A , A ):
_UpperCAmelCase : Optional[int] = left
if right is not None and not self._cmp(A , A ):
_UpperCAmelCase : Any = right
return valid_parent
def snake_case_ ( self : Tuple , A : int ):
_UpperCAmelCase : Tuple = self._parent(A )
while parent is not None and not self._cmp(A , A ):
self._swap(A , A )
_UpperCAmelCase , _UpperCAmelCase : Dict = parent, self._parent(A )
def snake_case_ ( self : Optional[int] , A : int ):
_UpperCAmelCase : Tuple = self._get_valid_parent(A )
while valid_parent != index:
self._swap(A , A )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = valid_parent, self._get_valid_parent(A )
def snake_case_ ( self : Dict , A : int , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : Any = self.pos_map[item]
_UpperCAmelCase : Optional[int] = [item, self.key(A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : List[str] , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : str = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase : Tuple = self.arr[self.size - 1]
_UpperCAmelCase : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : Any , A : int , A : int ):
_UpperCAmelCase : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(A )] )
else:
_UpperCAmelCase : Any = [item, self.key(A )]
_UpperCAmelCase : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case_ ( self : Tuple ):
return self.arr[0] if self.size else None
def snake_case_ ( self : Any ):
_UpperCAmelCase : Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __snake_case ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self : List[Any], lowerCAmelCase : List[str], lowerCAmelCase : Any=3, lowerCAmelCase : Optional[int]=32, lowerCAmelCase : Any=3, lowerCAmelCase : str=10, lowerCAmelCase : Optional[int]=[10, 20, 30, 40], lowerCAmelCase : str=[1, 1, 2, 1], lowerCAmelCase : Optional[Any]=True, lowerCAmelCase : Optional[int]=True, lowerCAmelCase : Any="relu", lowerCAmelCase : Tuple=3, lowerCAmelCase : int=None, ) -> Optional[Any]:
lowercase : Optional[int] = parent
lowercase : int = batch_size
lowercase : List[str] = image_size
lowercase : int = num_channels
lowercase : Tuple = embeddings_size
lowercase : List[Any] = hidden_sizes
lowercase : List[str] = depths
lowercase : int = is_training
lowercase : Union[str, Any] = use_labels
lowercase : List[Any] = hidden_act
lowercase : Any = num_labels
lowercase : List[str] = scope
lowercase : int = len(lowerCAmelCase )
def lowercase ( self : List[Any] ) -> int:
lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size], self.num_labels )
lowercase : int = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Tuple:
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def lowercase ( self : List[str], lowerCAmelCase : str, lowerCAmelCase : int, lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
lowercase : str = RegNetModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : Dict = model(lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase ( self : List[str], lowerCAmelCase : Tuple, lowerCAmelCase : int, lowerCAmelCase : List[Any] ) -> Tuple:
lowercase : Union[str, Any] = self.num_labels
lowercase : Any = RegNetForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : List[str] = model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> int:
lowercase : int = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Optional[int] = config_and_inputs
lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_lowerCamelCase = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase : str = RegNetModelTester(self )
lowercase : str = ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase )
def lowercase ( self : List[str] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Tuple:
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Tuple ) -> Dict:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Dict ) -> Dict:
pass
def lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] = model_class(lowerCAmelCase )
lowercase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Dict = [*signature.parameters.keys()]
lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCAmelCase )
def lowercase ( self : Tuple ) -> Any:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase ( self : str ) -> Optional[int]:
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict = model_class(config=lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(lowerCAmelCase, (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ), msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
self.assertTrue(
torch.all(module.bias == 0 ), msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
def lowercase ( self : Union[str, Any] ) -> str:
def check_hidden_states_output(lowerCAmelCase : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : Optional[int] ):
lowercase : Dict = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase : int = model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
lowercase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : Dict = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase : Optional[Any] = layer_type
lowercase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Optional[int] = True
check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Dict ) -> Union[str, Any]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowercase ( self : Union[str, Any] ) -> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = RegNetModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : List[str] ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : int ) -> int:
lowercase : str = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
lowercase : Optional[int] = self.default_image_processor
lowercase : Any = prepare_img()
lowercase : Optional[Any] = image_processor(images=lowerCAmelCase, return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase : Any = model(**lowerCAmelCase )
# verify the logits
lowercase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase )
lowercase : int = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
| 255 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCamelCase: List[Any] = logging.getLogger(__name__)
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether tp freeze the encoder.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_lowerCamelCase = field(
default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}, )
_lowerCamelCase = field(
default=1_024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=128, metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(default=-1, metadata={'help': '# training examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# validation examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# test examples. -1 means use all.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Source language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Target language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': '# num_beams to use for evaluation.'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'}, )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , f'''{split}_results.json''' ) )
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : int = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : Dict = SeqaSeqDataset
# Get datasets
lowercase : int = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
lowercase : str = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Optional[Any] = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase ) if training_args.predict_with_generate else None
)
lowercase : List[Any] = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
lowercase : List[Any] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
lowercase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : List[str] = train_result.metrics
lowercase : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase : Tuple = trainer.evaluate(metric_key_prefix='val' )
lowercase : Dict = data_args.n_val
lowercase : Tuple = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
lowercase : List[Any] = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test' )
lowercase : str = test_output.metrics
lowercase : Dict = data_args.n_test
if trainer.is_world_process_zero():
lowercase : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.predict_with_generate:
lowercase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
lowercase : Tuple = lmap(str.strip , _UpperCAmelCase )
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 255 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase__ = 'poolformer'
def __init__( self : Tuple , A : Dict=3 , A : Dict=16 , A : Dict=16 , A : str=3 , A : List[Any]=4.0 , A : int=[2, 2, 6, 2] , A : List[Any]=[64, 128, 320, 512] , A : str=[7, 3, 3, 3] , A : str=[4, 2, 2, 2] , A : int=[2, 1, 1, 1] , A : Tuple=4 , A : str=0.0 , A : Optional[Any]="gelu" , A : List[Any]=True , A : Union[str, Any]=1E-5 , A : Union[str, Any]=0.02 , **A : Dict , ):
__snake_case: Tuple = num_channels
__snake_case: int = patch_size
__snake_case: List[str] = stride
__snake_case: List[str] = padding
__snake_case: Optional[Any] = pool_size
__snake_case: Optional[Any] = hidden_sizes
__snake_case: List[str] = mlp_ratio
__snake_case: List[Any] = depths
__snake_case: Tuple = patch_sizes
__snake_case: Any = strides
__snake_case: List[Any] = num_encoder_blocks
__snake_case: str = drop_path_rate
__snake_case: str = hidden_act
__snake_case: Union[str, Any] = use_layer_scale
__snake_case: List[Any] = layer_scale_init_value
__snake_case: Optional[int] = initializer_range
super().__init__(**A )
class __snake_case ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return 2E-3
| 356 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCamelCase__ , )
assert hasattr(self , "env" )
def _lowercase ( self , UpperCamelCase__ ) -> Dict:
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"enabled": True,
"processes_per_host": 8,
}
lowerCamelCase : Union[str, Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCamelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCamelCase : Tuple = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="py36" , )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
# create estimator
lowerCamelCase : Optional[Any] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase__ )
| 48 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "roc_bert"
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[str]=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Optional[int]="absolute" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=9_1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=2_4_8_5_8 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =vocab_size
_a : List[str] =max_position_embeddings
_a : Optional[Any] =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : int =intermediate_size
_a : Any =hidden_act
_a : Dict =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : str =initializer_range
_a : Optional[int] =type_vocab_size
_a : Any =layer_norm_eps
_a : Any =use_cache
_a : Optional[int] =enable_pronunciation
_a : Optional[Any] =enable_shape
_a : Optional[Any] =pronunciation_embed_dim
_a : Tuple =pronunciation_vocab_size
_a : Union[str, Any] =shape_embed_dim
_a : Any =shape_vocab_size
_a : Tuple =concat_input
_a : List[str] =position_embedding_type
_a : List[str] =classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 276 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : List[str] = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : int = logging.getLogger()
lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : List[str] , A_ : Optional[Any] ) -> int:
"""simple docstring"""
os.makedirs(A_ , exist_ok=A_ )
lowerCamelCase_ = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(A_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(A_ )
def a__ ( self : Optional[Any] , A_ : int , A_ : str = "pytorch" ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = os.path.join(A_ , 'output' )
lowerCamelCase_ = os.path.join(A_ , 'data' )
self._create_dummy_data(data_dir=A_ )
lowerCamelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A_ , env=self.get_env() )
lowerCamelCase_ = os.path.join(A_ , 'metrics.json' )
with open(A_ ) as f:
lowerCamelCase_ = json.load(A_ )
return result
@require_torch_gpu
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 208 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] ) -> bool:
"""simple docstring"""
return len(set(__magic_name__ ) ) == len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class __A ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = (16, 32, 96, 256)
lowerCAmelCase_ = jnp.floataa
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ = self.block_out_channels[i]
lowerCamelCase__ = self.block_out_channels[i + 1]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = blocks
lowerCamelCase__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
for block in self.blocks:
lowerCamelCase__ = block(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
lowerCamelCase__ = self.conv_out(__lowerCAmelCase )
return embedding
@flax_register_to_config
class __A ( nn.Module , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 32
lowerCAmelCase_ = 4
lowerCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ = False
lowerCAmelCase_ = (320, 640, 1280, 1280)
lowerCAmelCase_ = 2
lowerCAmelCase_ = 8
lowerCAmelCase_ = None
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = "rgb"
lowerCAmelCase_ = (16, 32, 96, 256)
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(__lowerCAmelCase )
lowerCamelCase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )["params"]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.block_out_channels
lowerCamelCase__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ = FlaxTimestepEmbedding(__lowerCAmelCase , dtype=self.dtype )
lowerCamelCase__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase__ = self.only_cross_attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = block_out_channels[0]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ = output_channel
lowerCamelCase__ = block_out_channels[i]
lowerCamelCase__ = i == len(__lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase__ = FlaxDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCAmelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
if not is_final_block:
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
lowerCamelCase__ = down_blocks
lowerCamelCase__ = controlnet_down_blocks
# mid
lowerCamelCase__ = block_out_channels[-1]
lowerCamelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = False , ):
'''simple docstring'''
lowerCamelCase__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ = jnp.flip(__lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(__lowerCAmelCase , jnp.ndarray ):
lowerCamelCase__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ = jnp.expand_dims(__lowerCAmelCase , 0 )
lowerCamelCase__ = self.time_proj(__lowerCAmelCase )
lowerCamelCase__ = self.time_embedding(__lowerCAmelCase )
# 2. pre-process
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.controlnet_cond_embedding(__lowerCAmelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ = self.mid_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ = ()
for down_block_res_sample, controlnet_block in zip(__lowerCAmelCase , self.controlnet_down_blocks ):
lowerCamelCase__ = controlnet_block(__lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ = controlnet_down_block_res_samples
lowerCamelCase__ = self.controlnet_mid_block(__lowerCAmelCase )
# 6. scaling
lowerCamelCase__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCAmelCase , mid_block_res_sample=__lowerCAmelCase )
| 209 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( lowerCamelCase_ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : str):
'''simple docstring'''
with open(__snake_case , encoding='''utf-8''') as input_file:
SCREAMING_SNAKE_CASE_ : str = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''')
SCREAMING_SNAKE_CASE_ : List[str] = input_file.read()
SCREAMING_SNAKE_CASE_ : List[Any] = regexp.search(__snake_case)
return match
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : str):
'''simple docstring'''
with open(__snake_case , encoding='''utf-8''') as input_file:
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL)
SCREAMING_SNAKE_CASE_ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE_ : Dict = regexp.finditer(__snake_case)
SCREAMING_SNAKE_CASE_ : int = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = Path('''./datasets''')
SCREAMING_SNAKE_CASE_ : str = list(dataset_paths.absolute().glob('''**/*.py'''))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__snake_case)):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}')
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = Path('''./datasets''')
SCREAMING_SNAKE_CASE_ : str = list(dataset_paths.absolute().glob('''**/*.py'''))
for dataset in dataset_files:
if self._no_print_statements(str(__snake_case)):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 371 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = {}
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = {}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(UpperCamelCase )
if nodea not in self.connections:
self.add_node(UpperCamelCase )
__UpperCAmelCase : Any = probability
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return list(self.connections )
def lowerCamelCase__ ( self : Any , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : str = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[tuple[str, str, float]] , _UpperCamelCase : int ) -> dict[str, int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = Counter(graph.get_nodes() )
__UpperCAmelCase : Tuple = start
for _ in range(_UpperCamelCase ):
__UpperCAmelCase : str = graph.transition(_UpperCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase : List[Any] = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """facebook/nllb-200-distilled-600M"""
__a = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__a = """translator"""
__a = AutoTokenizer
__a = AutoModelForSeqaSeqLM
__a = LANGUAGE_CODES
__a = ["""text""", """text""", """text"""]
__a = ["""text"""]
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
__UpperCAmelCase : Union[str, Any] = self.lang_to_code[src_lang]
__UpperCAmelCase : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase , return_tensors="""pt""" , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
return self.model.generate(**UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase )
| 115 | 1 |
from collections.abc import Generator
def SCREAMING_SNAKE_CASE_ ( ) -> Generator[int, None, None]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase :int = 0, 1
while True:
UpperCamelCase , UpperCamelCase :Any = b, a + b
yield b
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 1000 ) -> int:
"""simple docstring"""
UpperCamelCase :str = 1
UpperCamelCase :Optional[int] = fibonacci_generator()
while len(str(next(__magic_name__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 62 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase :List[Any] = model_type_to_module_name(__magic_name__ )
UpperCamelCase :Union[str, Any] = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(__magic_name__ , __magic_name__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__magic_name__ , """__name__""" , __magic_name__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase :List[str] = importlib.import_module("""transformers""" )
if hasattr(__magic_name__ , __magic_name__ ):
return getattr(__magic_name__ , __magic_name__ )
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, os.PathLike] , __magic_name__ : Optional[Union[str, os.PathLike]] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : Optional[Dict[str, str]] = None , __magic_name__ : Optional[Union[bool, str]] = None , __magic_name__ : Optional[str] = None , __magic_name__ : bool = False , **__magic_name__ : Any , ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = get_file_from_repo(
__magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__magic_name__ , encoding="""utf-8""" ) as reader:
return json.load(__magic_name__ )
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any ):
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def _A ( cls : List[str] , __lowerCamelCase : List[Any] , **__lowerCamelCase : int ):
UpperCamelCase :Optional[Any] = kwargs.pop("""config""" , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = kwargs.pop("""trust_remote_code""" , __lowerCamelCase )
UpperCamelCase :Any = True
UpperCamelCase , UpperCamelCase :int = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :Union[str, Any] = config_dict.get("""image_processor_type""" , __lowerCamelCase )
UpperCamelCase :int = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase :Optional[Any] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase :Optional[int] = config_dict.pop("""feature_extractor_type""" , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCamelCase :str = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase :Any = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
UpperCamelCase :Dict = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
UpperCamelCase :Optional[Any] = getattr(__lowerCamelCase , """image_processor_type""" , __lowerCamelCase )
if hasattr(__lowerCamelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase :Any = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
UpperCamelCase :Tuple = image_processor_class_from_name(__lowerCamelCase )
UpperCamelCase :List[Any] = image_processor_auto_map is not None
UpperCamelCase :Any = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase :Optional[int] = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
UpperCamelCase :Optional[int] = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :int = kwargs.pop("""code_revision""" , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase :int = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _A ( __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 62 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__snake_case : Tuple = False
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self , _lowercase=32 ) -> Tuple:
set_seed(0 )
a_ : str = UNetaDModel(sample_size=_lowercase , in_channels=3 , out_channels=3 )
a_ : Dict = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def UpperCamelCase__ ( self ) -> List[str]:
a_ : int = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
a_ : Optional[int] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="""linear""" , clip_sample=_lowercase , )
a_ : Dict = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="""linear""" , clip_sample=_lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
a_ : Optional[Any] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_lowercase ) for _ in range(4 )]
a_ : Dict = [torch.randn((4, 3, 32, 32) ).to(_lowercase ) for _ in range(4 )]
a_ : Dict = [torch.randint(0 , 1_000 , (4,) ).long().to(_lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
a_ , a_ : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(_lowercase )
for i in range(4 ):
optimizer.zero_grad()
a_ : Any = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
a_ : List[Any] = model(_lowercase , timesteps[i] ).sample
a_ : Optional[int] = torch.nn.functional.mse_loss(_lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
a_ , a_ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(_lowercase )
for i in range(4 ):
optimizer.zero_grad()
a_ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
a_ : Tuple = model(_lowercase , timesteps[i] ).sample
a_ : List[Any] = torch.nn.functional.mse_loss(_lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
| 248 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : Tuple = original_name.split(""".""")[0]
a_ : List[Any] = key.split(""".""")
a_ : List[Any] = int(key_list[key_list.index(a__) - 2])
a_ : Dict = int(key_list[key_list.index(a__) - 1])
a_ : Any = orig_block_num - offset
a_ : Optional[int] = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''')
return key
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = OrderedDict()
a_ , a_ : Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith("""network"""):
a_ : str = key.replace("""network""" , """poolformer.encoder""")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""") and "patch_embed" not in key:
patch_emb_offset += 1
a_ : Tuple = key[: key.find("""proj""")]
a_ : Dict = key.replace(a__ , f'''patch_embeddings.{total_embed_found}.''')
a_ : Optional[Any] = key.replace("""proj""" , """projection""")
if key.endswith("""bias"""):
total_embed_found += 1
if "patch_embeddings" in key:
a_ : int = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
a_ : Union[str, Any] = replace_key_with_offset(a__ , a__ , """mlp.fc1""" , """output.conv1""")
if "mlp.fc2" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """mlp.fc2""" , """output.conv2""")
if "norm1" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """norm1""" , """before_norm""")
if "norm2" in key:
a_ : Any = replace_key_with_offset(a__ , a__ , """norm2""" , """after_norm""")
if "layer_scale_1" in key:
a_ : List[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_1""" , """layer_scale_1""")
if "layer_scale_2" in key:
a_ : Optional[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_2""" , """layer_scale_2""")
if "head" in key:
a_ : Optional[Any] = key.replace("""head""" , """classifier""")
a_ : Union[str, Any] = value
return new_state_dict
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Any = Image.open(requests.get(a__ , stream=a__).raw)
return image
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : str = PoolFormerConfig()
# set attributes based on model_name
a_ : Union[str, Any] = """huggingface/label-files"""
a_ : str = model_name[-3:]
a_ : Tuple = 1_0_0_0
a_ : List[str] = """imagenet-1k-id2label.json"""
a_ : Any = (1, 1_0_0_0)
# set config attributes
a_ : Optional[Any] = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""") , """r"""))
a_ : List[Any] = {int(a__): v for k, v in idalabel.items()}
a_ : Tuple = idalabel
a_ : int = {v: k for k, v in idalabel.items()}
if size == "s12":
a_ : Optional[int] = [2, 2, 6, 2]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Tuple = 0.9
elif size == "s24":
a_ : List[Any] = [4, 4, 1_2, 4]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Optional[Any] = 0.9
elif size == "s36":
a_ : str = [6, 6, 1_8, 6]
a_ : Dict = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : Optional[int] = 4.0
a_ : Optional[int] = 1e-6
a_ : Tuple = 0.9
elif size == "m36":
a_ : str = [6, 6, 1_8, 6]
a_ : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : str = 4.0
a_ : Union[str, Any] = 1e-6
a_ : str = 0.95
elif size == "m48":
a_ : List[Any] = [8, 8, 2_4, 8]
a_ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : int = 4.0
a_ : int = 1e-6
a_ : List[Any] = 0.95
else:
raise ValueError(f'''Size {size} not supported''')
# load image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
# Prepare image
a_ : List[Any] = prepare_img()
a_ : List[str] = image_processor(images=a__ , return_tensors="""pt""").pixel_values
logger.info(f'''Converting model {model_name}...''')
# load original state dict
a_ : List[str] = torch.load(a__ , map_location=torch.device("""cpu"""))
# rename keys
a_ : List[Any] = rename_keys(a__)
# create HuggingFace model and load state dict
a_ : List[str] = PoolFormerForImageClassification(a__)
model.load_state_dict(a__)
model.eval()
# Define image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
a_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""").pixel_values
# forward pass
a_ : Any = model(a__)
a_ : Any = outputs.logits
# define expected logit slices for different models
if size == "s12":
a_ : Union[str, Any] = torch.tensor([-0.3045, -0.6758, -0.4869])
elif size == "s24":
a_ : Optional[Any] = torch.tensor([0.4402, -0.1374, -0.8045])
elif size == "s36":
a_ : int = torch.tensor([-0.6080, -0.5133, -0.5898])
elif size == "m36":
a_ : List[str] = torch.tensor([0.3952, 0.2263, -1.2668])
elif size == "m48":
a_ : Union[str, Any] = torch.tensor([0.1167, -0.0656, -0.3423])
else:
raise ValueError(f'''Size {size} not supported''')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2)
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(a__).mkdir(exist_ok=a__)
model.save_pretrained(a__)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case : Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 248 | 1 |
from math import factorial, pi
def A (__A : float , __A : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(__A , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
UpperCAmelCase_ = float(__A )
UpperCAmelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def A (__A : float , __A : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(__A , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
UpperCAmelCase_ = float(__A )
UpperCAmelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 7 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(__a )
class _lowerCAmelCase ( __a ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
requires_backends(self , "decord" )
self.check_model_type(_UpperCamelCase )
def __a ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]:
lowerCAmelCase_ = {}
if frame_sampling_rate is not None:
lowerCAmelCase_ = frame_sampling_rate
if num_frames is not None:
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = {}
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , **_UpperCamelCase ) -> Tuple:
return super().__call__(_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 ) -> Tuple:
if num_frames is None:
lowerCAmelCase_ = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
lowerCAmelCase_ = BytesIO(requests.get(_UpperCamelCase ).content )
lowerCAmelCase_ = VideoReader(_UpperCamelCase )
videoreader.seek(0 )
lowerCAmelCase_ = 0
lowerCAmelCase_ = num_frames * frame_sampling_rate - 1
lowerCAmelCase_ = np.linspace(_UpperCamelCase , _UpperCamelCase , num=_UpperCamelCase , dtype=np.intaa )
lowerCAmelCase_ = videoreader.get_batch(_UpperCamelCase ).asnumpy()
lowerCAmelCase_ = list(_UpperCamelCase )
lowerCAmelCase_ = self.image_processor(_UpperCamelCase , return_tensors=self.framework )
return model_inputs
def __a ( self , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = self.model(**_UpperCamelCase )
return model_outputs
def __a ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> int:
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 231 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =CLIPTokenizer
_lowercase =CLIPTokenizerFast
_lowercase =True
_lowercase ={}
_lowercase =False
def __a ( self ) -> Dict:
super().setUp()
# fmt: off
lowerCAmelCase_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCamelCase ) )
def __a ( self , **_UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , **_UpperCamelCase ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = "lower newer"
return input_text, output_text
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
lowerCAmelCase_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@require_ftfy
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase_ = "xa\u0303y" + " " + "x\xe3y"
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase_ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase_ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase_ = f""" {text}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
def __a ( self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ) -> str:
super().test_tokenization_python_rust_equals()
def __a ( self ) -> Any:
# CLIP always lower cases letters
pass
| 231 | 1 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _A (lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A () -> Iterator[int]:
'''simple docstring'''
_a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def _A (lowerCAmelCase__ :int = 2_00_00_00 ) -> int:
'''simple docstring'''
return sum(takewhile(lambda lowerCAmelCase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 104 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : str = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git_vision_model"""
def __init__( self , __magic_name__=7_68 , __magic_name__=30_72 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=2_24 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.0_2 , **__magic_name__ , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
_a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git"""
def __init__( self , __magic_name__=None , __magic_name__=3_05_22 , __magic_name__=7_68 , __magic_name__=6 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10_24 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=False , __magic_name__=1_01 , __magic_name__=1_02 , __magic_name__=None , **__magic_name__ , ) -> Optional[int]:
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , pad_token_id=__magic_name__ , **__magic_name__ )
if vision_config is None:
_a = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
_a = GitVisionConfig(**__magic_name__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = tie_word_embeddings
_a = num_image_with_embedding
_a = bos_token_id
_a = eos_token_id
def __UpperCAmelCase ( self ) -> List[str]:
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
| 104 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : int = XLMTokenizer
UpperCAmelCase : Union[str, Any] = False
def _lowercase (self : Union[str, Any]) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__snake_case : Dict = dict(zip(_A , range(len(_A))))
__snake_case : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w') as fp:
fp.write(json.dumps(_A))
with open(self.merges_file , 'w') as fp:
fp.write('\n'.join(_A))
def _lowercase (self : int , _A : List[str]) -> Dict:
__snake_case : str = 'lower newer'
__snake_case : List[str] = 'lower newer'
return input_text, output_text
def _lowercase (self : Union[str, Any]) -> int:
__snake_case : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file)
__snake_case : str = 'lower'
__snake_case : List[str] = ['low', 'er</w>']
__snake_case : str = tokenizer.tokenize(_A)
self.assertListEqual(_A , _A)
__snake_case : str = tokens + ['<unk>']
__snake_case : Union[str, Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , _A)
@slow
def _lowercase (self : Dict) -> Optional[int]:
__snake_case : Optional[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
__snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=_A)
__snake_case : Tuple = tokenizer.encode('multi-sequence build' , add_special_tokens=_A)
__snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(_A)
__snake_case : int = tokenizer.build_inputs_with_special_tokens(_A , _A)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 172 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Optional[Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Optional[Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : Optional[int] = 1, 0, a
__snake_case , __snake_case , __snake_case : int = 0, 1, m
while va != 0:
__snake_case : Union[str, Any] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
super().setup()
A : Union[str, Any] = self.model.config
A : Tuple = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
A : List[str] = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ) -> int:
A : str = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Tuple ) -> str:
A : Optional[int] = outputs.logits
A : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 256 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase ( _lowerCamelCase ):
A : Any = []
for line in lines:
A : List[str] = re.sub(R"#.*" , "" , _lowerCamelCase ) # remove comments
if line:
filtered_lines.append(_lowerCamelCase )
A : str = "\n".join(_lowerCamelCase )
# Make a hash from all this code
A : Any = full_str.encode("utf-8" )
return shaaaa(_lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
__SCREAMING_SNAKE_CASE = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__SCREAMING_SNAKE_CASE = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__SCREAMING_SNAKE_CASE = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__SCREAMING_SNAKE_CASE = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""") | 256 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
a : Tuple = "bert-base-cased"
a : List[Any] = "fp16"
a : Any = "bf16"
a : Optional[int] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def A_ ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(snake_case ):
UpperCAmelCase : List[Any] = self.dist_env.copy()
UpperCAmelCase : Any = f"{i + 1}"
UpperCAmelCase : List[str] = strategy
with mockenv_context(**snake_case ):
UpperCAmelCase : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def A_ ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(snake_case ):
UpperCAmelCase : Dict = self.dist_env.copy()
UpperCAmelCase : Tuple = prefetch_policy
with mockenv_context(**snake_case ):
UpperCAmelCase : int = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def A_ ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(snake_case ):
UpperCAmelCase : Optional[int] = self.dist_env.copy()
UpperCAmelCase : Union[str, Any] = state_dict_type
with mockenv_context(**snake_case ):
UpperCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AutoModel.from_pretrained(snake_case )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
UpperCAmelCase : Dict = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCAmelCase : List[Any] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
UpperCAmelCase : int = "2000"
with mockenv_context(**snake_case ):
UpperCAmelCase : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCAmelCase : List[Any] = self.dist_env.copy()
UpperCAmelCase : Union[str, Any] = "TRANSFORMER_BASED_WRAP"
UpperCAmelCase : Optional[Any] = "T5Layer"
with mockenv_context(**snake_case ):
UpperCAmelCase : str = FullyShardedDataParallelPlugin()
with self.assertRaises(snake_case ) as cm:
fsdp_plugin.set_auto_wrap_policy(snake_case )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
UpperCAmelCase : Optional[int] = self.dist_env.copy()
UpperCAmelCase : str = "SIZE_BASED_WRAP"
UpperCAmelCase : List[str] = "0"
with mockenv_context(**snake_case ):
UpperCAmelCase : Any = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def A_ ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCAmelCase : Tuple = self.dist_env.copy()
UpperCAmelCase : Tuple = mp_dtype
with mockenv_context(**snake_case ):
UpperCAmelCase : Any = Accelerator()
if mp_dtype == "fp16":
UpperCAmelCase : int = torch.floataa
elif mp_dtype == "bf16":
UpperCAmelCase : Optional[Any] = torch.bfloataa
UpperCAmelCase : Union[str, Any] = MixedPrecision(param_dtype=snake_case , reduce_dtype=snake_case , buffer_dtype=snake_case )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , snake_case ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(snake_case )
def A_ ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCAmelCase : Optional[Any] = self.dist_env.copy()
UpperCAmelCase : Union[str, Any] = str(snake_case ).lower()
with mockenv_context(**snake_case ):
UpperCAmelCase : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = 0.82
UpperCAmelCase : Dict = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
UpperCAmelCase : Union[str, Any] = {
"multi_gpu_fp16": 3_2_0_0,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2_0_0_0,
"fsdp_full_shard_transformer_based_wrap_fp16": 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCAmelCase : str = 1_6_0
UpperCAmelCase : List[Any] = 1_6_0
UpperCAmelCase : Optional[int] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = os.path.join(self.test_scripts_folder , "test_performance.py" )
UpperCAmelCase : Dict = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
UpperCAmelCase : Optional[Any] = cmd.copy()
for i, strategy in enumerate(snake_case ):
if strategy.lower() in config:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--performance_lower_bound={self.performance_lower_bound}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
UpperCAmelCase : Optional[int] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(snake_case ):
UpperCAmelCase : Tuple = cmd.copy()
cmd_config.append(f"--fsdp_sharding_strategy={i+1}" )
if strategy != "FULL_SHARD":
continue
UpperCAmelCase : Union[str, Any] = len(snake_case )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCAmelCase : Optional[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}" )
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
UpperCAmelCase : Union[str, Any] = cmd_config[:-1]
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
f"--resume_from_checkpoint={resume_from_checkpoint}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
UpperCAmelCase : Union[str, Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCAmelCase : Union[str, Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(snake_case ):
if strategy.lower() in spec:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--peak_memory_upper_bound={peak_mem_upper_bound}",
f"--n_train={self.n_train}",
f"--n_val={self.n_val}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
| 311 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''spiece.model'''}
__snake_case = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
__snake_case = {
'''google/pegasus-xsum''': 5_12,
}
__snake_case = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case__ , snake_case__="<pad>" , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<mask_2>" , snake_case__="<mask_1>" , snake_case__=None , snake_case__=103 , snake_case__ = None , **snake_case__ , ) -> None:
'''simple docstring'''
UpperCAmelCase : str =offset
if additional_special_tokens is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(snake_case__ )}, but is'''
f''' {type(snake_case__ )}''' )
UpperCAmelCase : Any =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(snake_case__ ) , self.offset - 1 )
]
if len(set(snake_case__ ) ) != len(snake_case__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCAmelCase : Tuple =additional_special_tokens_extended
else:
UpperCAmelCase : List[Any] =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCAmelCase : str ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , pad_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
UpperCAmelCase : Dict =mask_token_sent
UpperCAmelCase : str =vocab_file
UpperCAmelCase : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# add special tokens to encoder dict
UpperCAmelCase : Dict[int, str] ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCAmelCase : Dict[str, int] ={v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def UpperCAmelCase__ ( self ) -> Dict[str, int]:
'''simple docstring'''
UpperCAmelCase : List[str] ={self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any =self.__dict__.copy()
UpperCAmelCase : str =None
return state
def __setstate__( self , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : List[Any] ={}
UpperCAmelCase : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCAmelCase : Optional[Any] =self.sp_model.piece_to_id(snake_case__ )
return sp_id + self.offset
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCAmelCase : Optional[int] =self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =[]
UpperCAmelCase : int =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
UpperCAmelCase : Any =[]
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def UpperCAmelCase__ ( self , snake_case__=False ) -> Any:
'''simple docstring'''
return 1
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(snake_case__ )
elif token_ids_a is None:
return self._special_token_mask(snake_case__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Tuple =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , '''wb''' ) as fi:
UpperCAmelCase : str =self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 351 | import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__snake_case = threading.Lock()
__snake_case = None
__snake_case = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__snake_case = logging.WARNING
__snake_case = True
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =os.getenv('''TRANSFORMERS_VERBOSITY''' , __lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowerCAmelCase_ ( )-> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def lowerCAmelCase_ ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase : Union[str, Any] =logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase : str =sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase : List[Any] =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase : Optional[int] =False
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase : str =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase : Optional[Any] =None
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return log_levels
def lowerCAmelCase_ ( __lowerCAmelCase = None )-> logging.Logger:
'''simple docstring'''
if name is None:
UpperCAmelCase : int =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Any:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__lowerCAmelCase )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase : int =False
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase : Tuple =True
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =_get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase : str =logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> None:
'''simple docstring'''
UpperCAmelCase : int =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__lowerCAmelCase )
def lowerCAmelCase_ ( self , *__lowerCAmelCase , **__lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , __lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = warning_advice
@functools.lru_cache(__lowerCAmelCase )
def lowerCAmelCase_ ( self , *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = warning_once
class __snake_case :
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict: # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase : Any =args[0] if args else None
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , snake_case__ ) -> str:
'''simple docstring'''
def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> int:
'''simple docstring'''
return self
def __exit__( self , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
return
class __snake_case :
def __call__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__snake_case = _tqdm_cls()
def lowerCAmelCase_ ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase : Dict =True
hf_hub_utils.enable_progress_bars()
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase : List[str] =False
hf_hub_utils.disable_progress_bars()
| 78 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase : Tuple = 16
UpperCAmelCase : Union[str, Any] = 32
def lowerCamelCase ( _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
return int(x / 2**2_0 )
class lowerCamelCase__ :
"""simple docstring"""
def __enter__( self : Union[str, Any] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__UpperCAmelCase : Dict = torch.cuda.memory_allocated()
return self
def __exit__( self : List[Any] , *UpperCamelCase : List[str] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__UpperCAmelCase : Any = torch.cuda.memory_allocated()
__UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
__UpperCAmelCase : Optional[int] = bamb(self.end - self.begin )
__UpperCAmelCase : Union[str, Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase ( _UpperCamelCase : Accelerator , _UpperCamelCase : int = 1_6 , _UpperCamelCase : str = "bert-base-cased" , _UpperCamelCase : int = 3_2_0 , _UpperCamelCase : int = 1_6_0 , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(_UpperCamelCase )
__UpperCAmelCase : Tuple = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f'''train[:{n_train}]''', """validation""": f'''validation[:{n_val}]'''} )
def tokenize_function(_UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCAmelCase : List[Any] = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_UpperCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(_UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__UpperCAmelCase : Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
__UpperCAmelCase : List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Optional[Any] = config["""lr"""]
__UpperCAmelCase : Any = int(config["""num_epochs"""] )
__UpperCAmelCase : int = int(config["""seed"""] )
__UpperCAmelCase : Union[str, Any] = int(config["""batch_size"""] )
__UpperCAmelCase : str = args.model_name_or_path
set_seed(_UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : Dict = get_dataloaders(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(_UpperCamelCase , return_dict=_UpperCamelCase )
# Instantiate optimizer
__UpperCAmelCase : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCAmelCase : Optional[int] = optimizer_cls(params=model.parameters() , lr=_UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCAmelCase : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__UpperCAmelCase : int = 1
__UpperCAmelCase : Optional[int] = (len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCAmelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=0 , num_training_steps=_UpperCamelCase , )
else:
__UpperCAmelCase : Union[str, Any] = DummyScheduler(_UpperCamelCase , total_num_steps=_UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
__UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCAmelCase : Dict = 0
# Now we train the model
__UpperCAmelCase : int = {}
for epoch in range(_UpperCamelCase , _UpperCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_UpperCamelCase ):
__UpperCAmelCase : List[str] = model(**_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = outputs.loss
__UpperCAmelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(_UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__UpperCAmelCase : Tuple = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_UpperCamelCase , )
parser.add_argument(
"""--output_dir""" , type=_UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=_UpperCamelCase , default=_UpperCamelCase , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=_UpperCamelCase , default=3_2_0 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=_UpperCamelCase , default=1_6_0 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=_UpperCamelCase , default=1 , help="""Number of train epochs.""" , )
__UpperCAmelCase : int = parser.parse_args()
__UpperCAmelCase : int = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 115 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase : List[Any] = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """facebook/nllb-200-distilled-600M"""
__a = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__a = """translator"""
__a = AutoTokenizer
__a = AutoModelForSeqaSeqLM
__a = LANGUAGE_CODES
__a = ["""text""", """text""", """text"""]
__a = ["""text"""]
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
__UpperCAmelCase : Union[str, Any] = self.lang_to_code[src_lang]
__UpperCAmelCase : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase , return_tensors="""pt""" , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
return self.model.generate(**UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase )
| 115 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( *_a , **_a ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCAmelCase ( self , _a , _a ):
__a = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'''score''': ANY(_a ),
'''label''': ANY(_a ),
'''box''': {'''xmin''': ANY(_a ), '''ymin''': ANY(_a ), '''xmax''': ANY(_a ), '''ymax''': ANY(_a )},
} , )
import datasets
__a = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__a = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__a = object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'''score''': ANY(_a ),
'''label''': ANY(_a ),
'''box''': {'''xmin''': ANY(_a ), '''ymin''': ANY(_a ), '''xmax''': ANY(_a ), '''ymax''': ANY(_a )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def __UpperCAmelCase ( self ):
pass
@require_torch
def __UpperCAmelCase ( self ):
__a = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__a = AutoModelForObjectDetection.from_pretrained(_a )
__a = AutoFeatureExtractor.from_pretrained(_a )
__a = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
__a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
__a = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self ):
__a = '''facebook/detr-resnet-50'''
__a = AutoModelForObjectDetection.from_pretrained(_a )
__a = AutoFeatureExtractor.from_pretrained(_a )
__a = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
__a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__a = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self ):
__a = '''facebook/detr-resnet-50'''
__a = pipeline('''object-detection''' , model=_a )
__a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__a = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self ):
__a = 0.9985
__a = '''facebook/detr-resnet-50'''
__a = pipeline('''object-detection''' , model=_a )
__a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCAmelCase ( self ):
__a = '''Narsil/layoutlmv3-finetuned-funsd'''
__a = 0.9993
__a = pipeline('''object-detection''' , model=_a , threshold=_a )
__a = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 1 |
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 ):
"""simple docstring"""
a :List[str] = right or len(UpperCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase_ , UpperCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
if "cls_token" in name:
lowerCamelCase__ : Any = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : str = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[int] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase__ : Any = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowerCamelCase__ : Dict = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowerCamelCase__ : List[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase__ : List[str] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase__ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase__ : str = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCamelCase__ : Tuple = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[int] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCamelCase__ : int = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase__ : Dict = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : List[str] = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
lowerCamelCase__ : List[Any] = key.split(""".""" )
lowerCamelCase__ : Optional[int] = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase__ : str = config.decoder_hidden_size
lowerCamelCase__ : List[Any] = """decoder.decoder_layers."""
if "weight" in key:
lowerCamelCase__ : int = val[:dim, :]
lowerCamelCase__ : int = val[dim : dim * 2, :]
lowerCamelCase__ : Tuple = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ : Tuple = val[:dim]
lowerCamelCase__ : Optional[int] = val[dim : dim * 2]
lowerCamelCase__ : List[Any] = val[-dim:]
else:
lowerCamelCase__ : List[Any] = config.hidden_size
lowerCamelCase__ : Optional[int] = """vit.encoder.layer."""
if "weight" in key:
lowerCamelCase__ : str = val[:dim, :]
lowerCamelCase__ : List[Any] = val[dim : dim * 2, :]
lowerCamelCase__ : Optional[int] = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ : int = val[:dim]
lowerCamelCase__ : List[Any] = val[dim : dim * 2]
lowerCamelCase__ : Optional[int] = val[-dim:]
else:
lowerCamelCase__ : int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Any = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase__ : Any = 1024
lowerCamelCase__ : Optional[Any] = 4096
lowerCamelCase__ : List[str] = 24
lowerCamelCase__ : Union[str, Any] = 16
elif "huge" in checkpoint_url:
lowerCamelCase__ : List[str] = 14
lowerCamelCase__ : Dict = 1280
lowerCamelCase__ : Tuple = 5120
lowerCamelCase__ : List[str] = 32
lowerCamelCase__ : Union[str, Any] = 16
lowerCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCamelCase )
lowerCamelCase__ : str = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ : List[str] = convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCamelCase__ : List[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
lowerCamelCase__ : str = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase )
lowerCamelCase__ : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase__ : List[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
lowerCamelCase__ : int = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 129 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_A : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _lowercase ( _lowercase ):
def __init__( self: Union[str, Any] , **UpperCamelCase__: str ):
super().__init__(**UpperCamelCase__ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self: Optional[Any] , UpperCamelCase__: Union[np.ndarray, bytes, str] , **UpperCamelCase__: int ):
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: int ):
lowerCamelCase__ : Optional[Any] = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ : str = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCamelCase__ : int = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: List[str]=None , UpperCamelCase__: Dict="This is a sound of {}." ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase__ : int = requests.get(UpperCamelCase__ ).content
else:
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase__ : Dict = f.read()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : str = ffmpeg_read(UpperCamelCase__ , self.feature_extractor.sampling_rate )
if not isinstance(UpperCamelCase__ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowerCamelCase__ : Optional[Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
lowerCamelCase__ : Any = candidate_labels
lowerCamelCase__ : Any = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels]
lowerCamelCase__ : int = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ )
lowerCamelCase__ : Tuple = [text_inputs]
return inputs
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Any = model_inputs.pop("""candidate_labels""" )
lowerCamelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = text_inputs[0]
else:
# Batching case.
lowerCamelCase__ : Tuple = text_inputs[0][0]
lowerCamelCase__ : Tuple = self.model(**UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = model_outputs.pop("""candidate_labels""" )
lowerCamelCase__ : int = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCamelCase__ : Optional[int] = logits.softmax(dim=0 )
lowerCamelCase__ : Any = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowerCamelCase__ : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] )
]
return result
| 129 | 1 |
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return "\n".join(
f"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 338 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 1 |
'''simple docstring'''
import string
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
snake_case_ = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
snake_case_ = string.ascii_uppercase.find(_lowerCAmelCase )
snake_case_ = num - key
if num < 0:
snake_case_ = num + len(string.ascii_uppercase )
snake_case_ = translated + string.ascii_uppercase[num]
else:
snake_case_ = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = input('''Encrypted message: ''' )
snake_case_ = message.upper()
decrypt(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 354 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Union[str, Any] = get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Tuple:
'''simple docstring'''
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Saving model to {output_model_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
logger.info(F"Saving model to {ckpt_dir}" )
snake_case_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCAmelCase, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), )
logger.info(F"Model saved to {ckpt_dir}" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> str:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading model from {input_model_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading model from {input_model_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" )
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}" )
snake_case_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCAmelCase, storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), planner=DefaultLoadPlanner(), )
snake_case_ = state_dict['''model''']
logger.info(F"Model loaded from {ckpt_dir}" )
model.load_state_dict(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Dict:
'''simple docstring'''
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(__UpperCAmelCase, __UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Saving Optimizer state to {output_optimizer_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Optimizer state saved in {output_optimizer_file}" )
else:
snake_case_ = os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
logger.info(F"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), )
logger.info(F"Optimizer state saved in {ckpt_dir}" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading Optimizer state from {input_optimizer_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Optimizer state loaded from {input_optimizer_file}" )
else:
snake_case_ = (
os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" )
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}" )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), )
snake_case_ = optim_state['''optimizer''']
logger.info(F"Optimizer loaded from {ckpt_dir}" )
snake_case_ = FSDP.optim_state_dict_to_load(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
optimizer.load_state_dict(__UpperCAmelCase )
| 72 | 0 |
_UpperCamelCase = 0 # The first color of the flag.
_UpperCamelCase = 1 # The second color of the flag.
_UpperCamelCase = 2 # The third color of the flag.
_UpperCamelCase = (red, white, blue)
def lowerCAmelCase__( lowercase : list ) -> list:
if not sequence:
return []
if len(lowercase ) == 1:
return list(lowercase )
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = len(lowercase ) - 1
__snake_case : Tuple = 0
while mid <= high:
if sequence[mid] == colors[0]:
__snake_case , __snake_case : List[str] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__snake_case , __snake_case : Dict = sequence[high], sequence[mid]
high -= 1
else:
__snake_case : List[str] = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input('''Enter numbers separated by commas:\n''').strip()
_UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 326 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
__snake_case : List[str] = word_bank or []
# create a table
__snake_case : int = len(lowercase ) + 1
__snake_case : list[list[list[str]]] = []
for _ in range(lowercase ):
table.append([] )
# seed value
__snake_case : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
__snake_case : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 326 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase__ = """\
"""
UpperCamelCase__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCamelCase__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = 1_6 , _A = True , _A=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = "cuda"
else:
__lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_A )
__lowerCAmelCase = model.to(_A )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , return_tensors="pt" , return_attention_mask=_A , ).to(_A )
__lowerCAmelCase = encodings["input_ids"]
__lowerCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_A ) , _A ) ):
__lowerCAmelCase = min(start_index + batch_size , len(_A ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_A )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_A ), attn_mask] , dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(_A , attention_mask=_A ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_A )}
| 102 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'table-transformer'
__lowerCamelCase : Tuple = ['past_key_values']
__lowerCamelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__(self , A=True , A=None , A=3 , A=100 , A=6 , A=2_048 , A=8 , A=6 , A=2_048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
_a = backbone_config.get('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(_snake_case )
# set timm attributes to None
_a , _a , _a = None, None, None
_a = use_timm_backbone
_a = backbone_config
_a = num_channels
_a = num_queries
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = encoder_layers
_a = auxiliary_loss
_a = position_embedding_type
_a = backbone
_a = use_pretrained_backbone
_a = dilation
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.d_model
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = version.parse('1.11' )
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def a__ (self ) -> float:
"""simple docstring"""
return 1E-5
@property
def a__ (self ) -> int:
"""simple docstring"""
return 12
| 211 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def __lowercase ( snake_case_ : Dict ) ->np.ndarray:
'''simple docstring'''
__A : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __lowercase ( snake_case_ : Optional[Any] ) ->np.ndarray:
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def __lowercase ( snake_case_ : Tuple ,snake_case_ : Union[str, Any] ) ->np.ndarray:
'''simple docstring'''
__A : Tuple = np.zeros_like(__lowerCAmelCase )
__A : List[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__A : List[str] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__A : Dict = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__A : Dict = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
a_ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
a_ = np.array(Image.open(lena_path))
# kernel to be applied
a_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
a_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
a_ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 362 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : int = [int(UpperCAmelCase_ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(UpperCAmelCase_ ) == 4 and all(0 <= int(UpperCAmelCase_ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
snake_case_ : Tuple = input().strip()
snake_case_ : List[Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 83 |
"""simple docstring"""
UpperCamelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCamelCase_ = ['a', 'b', 'c', 'd', 'e']
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = start
# add current to visited
visited.append(UpperCAmelCase )
a_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
a_ = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
for vertice in vertices:
if vertice not in visited:
a_ = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase_ = topological_sort('a', [], [])
print(sort) | 243 | 0 |
from math import loga
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__a , __a ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
def __lowerCamelCase ( __a :float , __a :list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
A__ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__a ) )
return round(__a , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 1 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(SCREAMING_SNAKE_CASE ) * abs(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 110 |
"""simple docstring"""
import numpy as np
def lowercase ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) ->Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = int(np.ceil((x_end - xa) / h ) )
__snake_case : Dict = np.zeros((n + 1,) )
__snake_case : List[Any] = ya
__snake_case : int = xa
for k in range(_snake_case ):
__snake_case : Any = f(_snake_case , y[k] )
__snake_case : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : Optional[int] = f(x + h , y[k] + h * ka )
__snake_case : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if height >= 1:
move_tower(height - 1 , _a , _a , _a)
move_disk(_a , _a)
move_tower(height - 1 , _a , _a , _a)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
print('moving disk from' , _a , 'to' , _a)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = int(input('Height of hanoi: ').strip())
move_tower(_a , 'A' , 'B' , 'C')
if __name__ == "__main__":
main()
| 364 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a , cache_dir=a)
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(a , os.listdir(a)[0] , 'snapshots'))]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1E-3
assert np.abs(np.abs(a , dtype=np.floataa).sum() - 4_99_47.8_75) < 5E-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(a) == num_samples
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=a , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=a , safety_checker=a , )
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0) , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , use_memory_efficient_attention=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 327 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase__ = """src/transformers"""
lowercase__ = """docs/source/en/tasks"""
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCamelCase : Tuple = f.readlines()
# Find the start prompt.
_lowerCamelCase : Tuple = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
_lowerCamelCase : List[Any] = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = direct_transformers_import(TRANSFORMERS_PATH)
lowercase__ = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase__ = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = TASK_GUIDE_TO_MODELS[task_guide]
_lowerCamelCase : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() )
_lowerCamelCase : Optional[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _snake_case ( lowercase__ , lowercase__=False ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = _find_text_in_file(
filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_lowerCamelCase : Any = get_model_list_for_task(lowercase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowercase__ , lowercase__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 96 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = "MCTCTFeatureExtractor"
_SCREAMING_SNAKE_CASE : int = "AutoTokenizer"
def __init__(self : Any , snake_case_ : List[Any] , snake_case_ : List[Any] ):
super().__init__(snake_case_ , snake_case_ )
__a : List[str] = self.feature_extractor
__a : List[str] = False
def __call__(self : Dict , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__a : List[str] = kwargs.pop('''raw_speech''' )
else:
__a : Any = kwargs.pop('''audio''' , snake_case_ )
__a : List[str] = kwargs.pop('''sampling_rate''' , snake_case_ )
__a : Dict = kwargs.pop('''text''' , snake_case_ )
if len(snake_case_ ) > 0:
__a : Optional[int] = args[0]
__a : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__a : int = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if text is not None:
__a : Union[str, Any] = self.tokenizer(snake_case_ , **snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a : Optional[Any] = encodings['''input_ids''']
return inputs
def lowerCAmelCase (self : int , *snake_case_ : List[str] , **snake_case_ : int ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case_ , **snake_case_ )
__a : List[Any] = kwargs.pop('''input_features''' , snake_case_ )
__a : Dict = kwargs.pop('''labels''' , snake_case_ )
if len(snake_case_ ) > 0:
__a : str = args[0]
__a : List[str] = args[1:]
if input_features is not None:
__a : Union[str, Any] = self.feature_extractor.pad(snake_case_ , *snake_case_ , **snake_case_ )
if labels is not None:
__a : Optional[int] = self.tokenizer.pad(snake_case_ , **snake_case_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__a : Dict = labels['''input_ids''']
return input_features
def lowerCAmelCase (self : Optional[int] , *snake_case_ : Any , **snake_case_ : Tuple ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@contextmanager
def lowerCAmelCase (self : Any ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__a : List[Any] = True
__a : Optional[int] = self.tokenizer
yield
__a : Dict = self.feature_extractor
__a : Any = False
| 90 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase__ =10
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
if array[i] == target:
return i
return -1
def __UpperCamelCase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
__a : List[Any] = 0
__a : Union[str, Any] = len(lowerCAmelCase__ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : str = (left + right) // 3 + 1
__a : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__a : List[str] = one_third - 1
elif array[two_third] < target:
__a : List[str] = two_third + 1
else:
__a : Dict = one_third + 1
__a : Any = two_third - 1
else:
return -1
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : Union[str, Any] = (left + right) // 3 + 1
__a : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase__ , one_third - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =input('Enter numbers separated by comma:\n').strip()
lowercase__ =[int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowercase__ =int(input('Enter the number to be found in the list:\n').strip())
lowercase__ =ite_ternary_search(collection, target)
lowercase__ =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 90 | 1 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35 |
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = {'''source''': '''What is love ?''', '''target''': '''life'''}
_UpperCamelCase = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCamelCase = '''\n'''.join([contents[field]] * n_lines[split])
with open(os.path.join(__a , F'''{split}.{field}''') , '''w''') as f:
f.write(__a)
def UpperCAmelCase ( self , __a , __a = "pytorch") -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = os.path.join(__a , '''output''')
_UpperCamelCase = os.path.join(__a , '''data''')
self._create_dummy_data(data_dir=__a)
_UpperCamelCase = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''')
if is_apex_available():
testargs.append('''--fp16''')
else:
testargs.append('''--gpus=0''')
testargs.append('''--distributed_backend=ddp_cpu''')
testargs.append('''--num_processes=2''')
_UpperCamelCase = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs
execute_subprocess_async(__a , env=self.get_env())
_UpperCamelCase = os.path.join(__a , '''metrics.json''')
with open(__a) as f:
_UpperCamelCase = json.load(__a)
return result
@require_torch_gpu
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self._run_finetune(gpus=1)
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2)
@require_torch_multi_gpu
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self._run_finetune(gpus=2)
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2)
@require_torch_gpu
@require_ray
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''')
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2)
@require_torch_multi_gpu
@require_ray
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''')
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2) | 361 |
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_a = get_logger(__name__)
class _UpperCAmelCase:
def __init__( self , __a = None) -> List[str]:
'''simple docstring'''
_UpperCamelCase = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCamelCase = Extractor
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCamelCase = os.path.abspath(__a)
return os.path.join(self.extract_dir , hash_url_to_filename(__a))
def UpperCAmelCase ( self , __a , __a) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(__a) and not (os.path.isdir(__a) and os.listdir(__a))
)
def UpperCAmelCase ( self , __a , __a = False) -> str:
'''simple docstring'''
_UpperCamelCase = self.extractor.infer_extractor_format(__a)
if not extractor_format:
return input_path
_UpperCamelCase = self._get_output_path(__a)
if self._do_extract(__a , __a):
self.extractor.extract(__a , __a , __a)
return output_path
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
@abstractmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
...
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ):
lowercase__ = []
@staticmethod
def UpperCAmelCase ( __a , __a) -> Any:
'''simple docstring'''
with open(__a , '''rb''') as f:
return f.read(__a)
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if not magic_number:
_UpperCamelCase = max(len(__a) for cls_magic_number in cls.magic_numbers)
try:
_UpperCamelCase = cls.read_magic_number(__a , __a)
except OSError:
return False
return any(magic_number.startswith(__a) for cls_magic_number in cls.magic_numbers)
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(__a)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
def resolved(__a) -> str:
return os.path.realpath(os.path.abspath(__a))
def badpath(__a , __a) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a)).startswith(__a)
def badlink(__a , __a) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCamelCase = resolved(os.path.join(__a , os.path.dirname(info.name)))
return badpath(info.linkname , base=__a)
_UpperCamelCase = resolved(__a)
for finfo in members:
if badpath(finfo.name , __a):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''')
elif finfo.issym() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''')
elif finfo.islnk() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''')
else:
yield finfo
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = tarfile.open(__a)
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a))
tar_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x1F\x8B']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with gzip.open(__a , '''rb''') as gzip_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if super().is_extractable(__a , magic_number=__a):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , '''rb''') as fp:
_UpperCamelCase = _EndRecData(__a)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCamelCase = fp.read(__a) # CD is where we expect it to be
if len(__a) == sizeCentralDir:
_UpperCamelCase = struct.unpack(__a , __a) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
with zipfile.ZipFile(__a , '''r''') as zip_file:
zip_file.extractall(__a)
zip_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with lzma.open(__a) as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''')
import rarfile
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = rarfile.RarFile(__a)
rf.extractall(__a)
rf.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''')
import zstandard as zstd
_UpperCamelCase = zstd.ZstdDecompressor()
with open(__a , '''rb''') as ifh, open(__a , '''wb''') as ofh:
dctx.copy_stream(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x42\x5A\x68']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with bza.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''')
import pyazr
os.makedirs(__a , exist_ok=__a)
with pyazr.SevenZipFile(__a , '''r''') as archive:
archive.extractall(__a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x04\x22\x4D\x18']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''')
import lza.frame
with lza.frame.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowercase__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase ( cls) -> Any:
'''simple docstring'''
return max(
len(__a)
for extractor in cls.extractors.values()
if issubclass(__a , __a)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a)
except OSError:
return b""
@classmethod
def UpperCAmelCase ( cls , __a , __a = False) -> bool:
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = cls.infer_extractor_format(__a)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase ( cls , __a) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_UpperCamelCase = cls._get_magic_number_max_length()
_UpperCamelCase = cls._read_magic_number(__a , __a)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a):
return extractor_format
@classmethod
def UpperCAmelCase ( cls , __a , __a , __a = None , __a = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(__a) , exist_ok=__a)
# Prevent parallel extractions
_UpperCamelCase = str(Path(__a).with_suffix('''.lock'''))
with FileLock(__a):
shutil.rmtree(__a , ignore_errors=__a)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = extractor if extractor != '''deprecated''' else extractor_format
else:
_UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(__a , __a)
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a):
return extractor.extract(__a , __a)
| 100 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase ( _lowerCAmelCase : Dict[str, torch.Tensor] ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for rt in rc.restypes:
UpperCAmelCase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase__ = {name: i for i, name in enumerate(_lowerCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase__ = torch.tensor(
_lowerCAmelCase , dtype=torch.intaa , device=protein["aatype"].device , )
UpperCAmelCase__ = torch.tensor(
_lowerCAmelCase , dtype=torch.intaa , device=protein["aatype"].device , )
UpperCAmelCase__ = torch.tensor(
_lowerCAmelCase , dtype=torch.floataa , device=protein["aatype"].device , )
UpperCAmelCase__ = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase__ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase__ = residx_atomaa_mask
UpperCAmelCase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase__ = rc.restype_atoa[restype_letter]
UpperCAmelCase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase__ = rc.atom_order[atom_name]
UpperCAmelCase__ = 1
UpperCAmelCase__ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase__ = residx_atomaa_mask
return protein
def lowerCAmelCase ( _lowerCAmelCase : Dict[str, torch.Tensor] ):
"""simple docstring"""
UpperCAmelCase__ = tree_map(lambda _lowerCAmelCase : torch.tensor(_lowerCAmelCase , device=batch["aatype"].device ) , _lowerCAmelCase , np.ndarray )
UpperCAmelCase__ = tensor_tree_map(lambda _lowerCAmelCase : np.array(_lowerCAmelCase ) , make_atomaa_masks(_lowerCAmelCase ) )
return out
| 169 |
def lowerCAmelCase ( _lowerCAmelCase : int = 100 ):
"""simple docstring"""
UpperCAmelCase__ = set()
UpperCAmelCase__ = 0
UpperCAmelCase__ = n + 1 # maximum limit
for a in range(2 , _lowerCAmelCase ):
for b in range(2 , _lowerCAmelCase ):
UpperCAmelCase__ = a**b # calculates the current power
collect_powers.add(_lowerCAmelCase ) # adds the result to the set
return len(_lowerCAmelCase )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 169 | 1 |
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 100 ) -> List[Any]:
'''simple docstring'''
snake_case : int = set()
snake_case : Any = 0
snake_case : Optional[Any] = n + 1 # maximum limit
for a in range(2 , _A ):
for b in range(2 , _A ):
snake_case : Optional[int] = a**b # calculates the current power
collect_powers.add(_A ) # adds the result to the set
return len(_A )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 353 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Any=[10, 20, 30, 40] , UpperCamelCase__ : Any=[1, 1, 2, 1] , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Tuple=None , ) -> List[str]:
"""simple docstring"""
snake_case : List[str] = parent
snake_case : Tuple = batch_size
snake_case : int = image_size
snake_case : Any = num_channels
snake_case : Optional[int] = embeddings_size
snake_case : Optional[int] = hidden_sizes
snake_case : str = depths
snake_case : Tuple = is_training
snake_case : List[str] = use_labels
snake_case : List[str] = hidden_act
snake_case : Tuple = num_labels
snake_case : Tuple = scope
snake_case : List[str] = len(UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Any = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModel(config=UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
snake_case : int = self.num_labels
snake_case : List[str] = FlaxRegNetForImageClassification(config=UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : str = self.prepare_config_and_inputs()
snake_case ,snake_case : Tuple = config_and_inputs
snake_case : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : int = [*signature.parameters.keys()]
snake_case : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Tuple = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest('''JIT Enabled''' ):
snake_case : Optional[int] = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case : Tuple = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
snake_case : str = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
snake_case : Any = self.default_image_processor
snake_case : Any = prepare_img()
snake_case : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors='''np''' )
snake_case : List[str] = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Dict = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 83 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
if isinstance(__UpperCamelCase, np.ndarray ):
return list(tensor.shape )
A_ = tf.shape(__UpperCamelCase )
if tensor.shape == tf.TensorShape(__UpperCamelCase ):
return dynamic
A_ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__UpperCamelCase )]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = None ) -> Optional[Any]:
return tf.nn.softmax(logits=logits + 1e-9, axis=__UpperCamelCase, name=__UpperCamelCase )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=1e-5, UpperCAmelCase__=-1 ) -> List[Any]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__UpperCamelCase, __UpperCamelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A_ , A_ = tf.nn.moments(__UpperCamelCase, axes=[axis], keepdims=__UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A_ = [1] * inputs.shape.rank
A_ = shape_list(__UpperCamelCase )[axis]
A_ = tf.reshape(__UpperCamelCase, __UpperCamelCase )
A_ = tf.reshape(__UpperCamelCase, __UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
A_ = tf.nn.batch_normalization(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, offset=__UpperCamelCase, scale=__UpperCamelCase, variance_epsilon=__UpperCamelCase, )
return outputs
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=0, UpperCAmelCase__=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A_ = tf.shape(__UpperCamelCase )
A_ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A_ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(__UpperCamelCase, __UpperCamelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
if not isinstance(__UpperCamelCase, tf.Tensor ):
A_ = tf.convert_to_tensor(__UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A_ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A_ = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = "input_ids" ) -> Union[str, Any]:
tf.debugging.assert_less(
__UpperCamelCase, tf.cast(__UpperCamelCase, dtype=tensor.dtype ), message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(__UpperCamelCase )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
), )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
A_ = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A_ = [x for x in data if len(__UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
A_ = np.asarray(__UpperCamelCase )
A_ = 1
A_ = np.array_split(__UpperCamelCase, __UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A_ = np.array_split(__UpperCamelCase, __UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__UpperCamelCase ):
A_ = chunk_data
else:
A_ = data
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
if name in group.attrs:
A_ = [n.decode("""utf8""" ) if hasattr(__UpperCamelCase, """decode""" ) else n for n in group.attrs[name]]
else:
A_ = []
A_ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(__UpperCamelCase, """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
def _expand_single_ad_tensor(UpperCAmelCase__ ):
if isinstance(__UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, __UpperCamelCase )
| 162 | from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 118 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = BarthezTokenizer
snake_case = BarthezTokenizerFast
snake_case = True
snake_case = True
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
super().setUp()
_A = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCAmelCase )
_A = tokenizer
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "<pad>"
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__UpperCAmelCase ) , 101122 )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_A = [0, 57, 3018, 70307, 91, 2]
_A = self.tokenizer(
__UpperCAmelCase , max_length=len(__UpperCAmelCase ) , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = "I was born in 92000, and this is falsé."
_A = tokenizer.tokenize(__UpperCAmelCase )
_A = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_A = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = self.get_rust_tokenizer()
_A = tokenizer.encode(__UpperCAmelCase )
_A = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_A = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__UpperCAmelCase , )
| 366 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''levit'''
def __init__( self : str , __UpperCAmelCase : int=224 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Any=[128, 256, 384] , __UpperCAmelCase : Optional[Any]=[4, 8, 12] , __UpperCAmelCase : Dict=[4, 4, 4] , __UpperCAmelCase : Union[str, Any]=[16, 16, 16] , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : str=[2, 2, 2] , __UpperCAmelCase : Optional[Any]=[2, 2, 2] , __UpperCAmelCase : int=0.02 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = image_size
_A = num_channels
_A = kernel_size
_A = stride
_A = padding
_A = hidden_sizes
_A = num_attention_heads
_A = depths
_A = key_dim
_A = drop_path_rate
_A = patch_size
_A = attention_ratio
_A = mlp_ratio
_A = initializer_range
_A = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return 1E-4
| 174 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: List[str] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 193 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxBertModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''bert-base is not a local folder and is not a valid model identifier''' ):
A__ = FlaxAutoModel.from_pretrained('''bert-base''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase,revision='''aaaaaa''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''',):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(__lowerCamelCase,'''Use `from_pt=True` to load this model''' ):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 193 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__snake_case :Optional[int] = None
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Union[str, Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :str = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
__snake_case :str = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
__snake_case :Union[str, Any] = '''▁'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Tuple = ['''input_ids''', '''token_type_ids''']
UpperCamelCase__ : List[str] = FNetTokenizer
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[Any]="<unk>" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="<pad>" , __SCREAMING_SNAKE_CASE : Optional[Any]="[CLS]" , __SCREAMING_SNAKE_CASE : str="[MASK]" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE)
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
else mask_token
)
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 353 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case :Tuple = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
requires_backends(self , '''vision''')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, "Image.Image", List[Dict[str, Any]]] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
if "text_queries" in kwargs:
__a = kwargs.pop('''text_queries''')
if isinstance(__SCREAMING_SNAKE_CASE , (str, Image.Image)):
__a = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__a = image
__a = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return results
def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs['''threshold''']
if "top_k" in kwargs:
__a = kwargs['''top_k''']
return {}, {}, postprocess_params
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = load_image(inputs['''image'''])
__a = inputs['''candidate_labels''']
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = candidate_labels.split(''',''')
__a = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__SCREAMING_SNAKE_CASE):
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
yield {
"is_last": i == len(__SCREAMING_SNAKE_CASE) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = model_inputs.pop('''target_size''')
__a = model_inputs.pop('''candidate_label''')
__a = model_inputs.pop('''is_last''')
__a = self.model(**__SCREAMING_SNAKE_CASE)
__a = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
__a = []
for model_output in model_outputs:
__a = model_output['''candidate_label''']
__a = BaseModelOutput(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.post_process_object_detection(
outputs=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE , target_sizes=model_output['''target_size'''])[0]
for index in outputs["scores"].nonzero():
__a = outputs['''scores'''][index].item()
__a = self._get_bounding_box(outputs['''boxes'''][index][0])
__a = {'''score''': score, '''label''': label, '''box''': box}
results.append(__SCREAMING_SNAKE_CASE)
__a = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE: x["score"] , reverse=__SCREAMING_SNAKE_CASE)
if top_k:
__a = results[:top_k]
return results
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "torch.Tensor"):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''')
__a , __a , __a , __a = box.int().tolist()
__a = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 131 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 9_99
_snake_case = torch.randn(2, 77, 7_68)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_66
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 283 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : List[Any] = BioGptTokenizer
__A : Optional[int] = False
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = "lower newer"
lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : Optional[int] = "lower"
lowerCamelCase : Any = ["low", "er</w>"]
lowerCamelCase : List[str] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Union[str, Any] = tokens + ["<unk>"]
lowerCamelCase : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A )
lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 283 | 1 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = ConsistencyModelPipeline
UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Any , A : Tuple=False) -> int:
"""simple docstring"""
if class_cond:
_UpperCAmelCase = self.dummy_cond_unet
else:
_UpperCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : str , A : Optional[Any] , A : List[Any]=0) -> int:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = ConsistencyModelPipeline(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_dummy_inputs(A)
_UpperCAmelCase = pipe(**A).images
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components(class_cond=A)
_UpperCAmelCase = ConsistencyModelPipeline(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_dummy_inputs(A)
_UpperCAmelCase = 0
_UpperCAmelCase = pipe(**A).images
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = ConsistencyModelPipeline(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_dummy_inputs(A)
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = pipe(**A).images
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components(class_cond=A)
_UpperCAmelCase = ConsistencyModelPipeline(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_dummy_inputs(A)
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = 0
_UpperCAmelCase = pipe(**A).images
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple , A : Any=0 , A : int=False , A : Optional[int]="cpu" , A : Optional[Any]=torch.floataa , A : List[str]=(1, 3, 64, 64)) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(A)
_UpperCAmelCase = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_UpperCAmelCase = self.get_fixed_latents(seed=A , device=A , dtype=A , shape=A)
_UpperCAmelCase = latents
return inputs
def _lowerCamelCase ( self : Optional[int] , A : int=0 , A : Optional[Any]="cpu" , A : List[Any]=torch.floataa , A : List[Any]=(1, 3, 64, 64)) -> Dict:
"""simple docstring"""
if type(A) == str:
_UpperCAmelCase = torch.device(A)
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = randn_tensor(A , generator=A , device=A , dtype=A)
return latents
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2')
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=A , scheduler=A)
pipe.to(torch_device=A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**A).images
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2')
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=A , scheduler=A)
pipe.to(torch_device=A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = pipe(**A).images
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2')
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=A , scheduler=A)
pipe.to(torch_device=A , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_inputs(get_fixed_latents=A , device=A)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A):
_UpperCAmelCase = pipe(**A).images
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2')
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=A , scheduler=A)
pipe.to(torch_device=A , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_inputs(get_fixed_latents=A , device=A)
_UpperCAmelCase = 1
_UpperCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A):
_UpperCAmelCase = pipe(**A).images
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 290 |
UpperCAmelCase__ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def A ( _UpperCAmelCase : dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ) -> list[str]:
'''simple docstring'''
_UpperCAmelCase = set()
# keep track of all the paths to be checked
_UpperCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_UpperCAmelCase = queue.pop(0 )
# get the last node from the path
_UpperCAmelCase = path[-1]
if node not in explored:
_UpperCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_UpperCAmelCase = list(_UpperCAmelCase )
new_path.append(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def A ( _UpperCAmelCase : dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_UpperCAmelCase = [start]
_UpperCAmelCase = set(_UpperCAmelCase )
# Keep tab on distances from `start` node.
_UpperCAmelCase = {start: 0, target: -1}
while queue:
_UpperCAmelCase = queue.pop(0 )
if node == target:
_UpperCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
_UpperCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 290 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __snake_case :
def __init__( self , lowercase , lowercase=14 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = parent
a__: List[Any] = batch_size
a__: Optional[int] = seq_length
a__: List[Any] = is_training
a__: List[str] = use_token_type_ids
a__: Tuple = use_input_mask
a__: Union[str, Any] = use_labels
a__: Tuple = use_mc_token_ids
a__: Any = vocab_size
a__: Dict = hidden_size
a__: Optional[int] = num_hidden_layers
a__: List[Any] = num_attention_heads
a__: str = intermediate_size
a__: Tuple = hidden_act
a__: List[Any] = hidden_dropout_prob
a__: Optional[int] = attention_probs_dropout_prob
a__: List[str] = max_position_embeddings
a__: Tuple = type_vocab_size
a__: int = type_sequence_label_size
a__: Union[str, Any] = initializer_range
a__: Any = num_labels
a__: Optional[Any] = num_choices
a__: Dict = scope
a__: Any = self.vocab_size - 1
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: Tuple = None
if self.use_input_mask:
a__: Any = random_attention_mask([self.batch_size, self.seq_length])
a__: List[Any] = None
if self.use_token_type_ids:
a__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__: Dict = None
if self.use_mc_token_ids:
a__: int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length)
a__: Optional[Any] = None
a__: Tuple = None
a__: Tuple = None
if self.use_labels:
a__: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__: List[str] = ids_tensor([self.batch_size] , self.num_choices)
a__: Optional[int] = self.get_config()
a__: Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase) -> List[str]:
'''simple docstring'''
a__: List[str] = CTRLModel(config=lowercase)
model.to(lowercase)
model.eval()
model(lowercase , token_type_ids=lowercase , head_mask=lowercase)
model(lowercase , token_type_ids=lowercase)
a__: str = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) , config.n_layer)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase) -> int:
'''simple docstring'''
a__: Optional[Any] = CTRLLMHeadModel(lowercase)
model.to(lowercase)
model.eval()
a__: List[str] = model(lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
): List[Any] = config_and_inputs
a__: Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , *lowercase) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.num_labels
a__: Optional[int] = CTRLForSequenceClassification(lowercase)
model.to(lowercase)
model.eval()
a__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: Optional[Any] = model(lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
a__ = (CTRLLMHeadModel,) if is_torch_available() else ()
a__ = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
a__ = False
a__ = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[str] = CTRLModelTester(self)
a__: Optional[Any] = ConfigTester(self , config_class=lowercase , n_embd=37)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowercase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: int = CTRLModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
pass
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = CTRLLMHeadModel.from_pretrained('ctrl')
model.to(lowercase)
a__: Optional[int] = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=lowercase) # Legal the president is
a__: List[Any] = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__: List[str] = model.generate(lowercase , do_sample=lowercase)
self.assertListEqual(output_ids[0].tolist() , lowercase)
| 290 | """simple docstring"""
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Dict = data
a__: List[Any] = previous
a__: Any = next_node
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = head
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__: Dict = self.current.get_data()
a__: Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self) -> Dict:
'''simple docstring'''
a__: List[Any] = None # First node in list
a__: Optional[int] = None # Last node in list
def __str__( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.head
a__: Optional[Any] = []
while current is not None:
nodes.append(current.get_data())
a__: str = current.get_next()
return " ".join(str(lowercase) for node in nodes)
def __contains__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.head
while current:
if current.get_data() == value:
return True
a__: Dict = current.get_next()
return False
def __iter__( self) -> int:
'''simple docstring'''
return LinkedListIterator(self.head)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
a__: Optional[Any] = node
a__: Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowercase)
else:
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = Node(lowercase)
if self.head is None:
self.set_head(lowercase)
else:
self.set_tail(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = node.previous
if node.get_previous() is None:
a__: Tuple = node_to_insert
else:
a__: int = node_to_insert
a__: Optional[int] = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Optional[int] = node
a__: Tuple = node.next
if node.get_next() is None:
a__: Optional[int] = node_to_insert
else:
a__: Any = node_to_insert
a__: str = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Any = 1
a__: Tuple = Node(lowercase)
a__: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase)
return
current_position += 1
a__: List[Any] = node.next
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Node:
'''simple docstring'''
a__: Tuple = self.head
while node:
if node.get_data() == item:
return node
a__: List[str] = node.get_next()
raise Exception('Node not found')
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if (node := self.get_node(lowercase)) is not None:
if node == self.head:
a__: Any = self.head.get_next()
if node == self.tail:
a__: List[Any] = self.tail.get_previous()
self.remove_node_pointers(lowercase)
@staticmethod
def lowerCamelCase_ ( lowercase) -> None:
'''simple docstring'''
if node.get_next():
a__: Any = node.previous
if node.get_previous():
a__: List[str] = node.next
a__: int = None
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.head is None
def __a ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__UpperCAmelCase : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__UpperCAmelCase : Optional[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def a ( SCREAMING_SNAKE_CASE_ : Vector , SCREAMING_SNAKE_CASE_ : Vector ):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE_ ) - np.asarray(SCREAMING_SNAKE_CASE_ )) ** 2 ) )
def a ( SCREAMING_SNAKE_CASE_ : Vector , SCREAMING_SNAKE_CASE_ : Vector ):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ** (1 / 2)
if __name__ == "__main__":
def a ( ):
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 315 |
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[Any] = """informer"""
__lowerCAmelCase : List[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self :Any ,__lowercase :Optional[int] = None ,__lowercase :Optional[int] = None ,__lowercase :str = "student_t" ,__lowercase :str = "nll" ,__lowercase :int = 1 ,__lowercase :List[int] = None ,__lowercase :Optional[Union[str, bool]] = "mean" ,__lowercase :int = 0 ,__lowercase :int = 0 ,__lowercase :int = 0 ,__lowercase :int = 0 ,__lowercase :Optional[List[int]] = None ,__lowercase :Optional[List[int]] = None ,__lowercase :int = 6_4 ,__lowercase :int = 3_2 ,__lowercase :int = 3_2 ,__lowercase :int = 2 ,__lowercase :int = 2 ,__lowercase :int = 2 ,__lowercase :int = 2 ,__lowercase :bool = True ,__lowercase :str = "gelu" ,__lowercase :float = 0.05 ,__lowercase :float = 0.1 ,__lowercase :float = 0.1 ,__lowercase :float = 0.1 ,__lowercase :float = 0.1 ,__lowercase :int = 1_0_0 ,__lowercase :float = 0.02 ,__lowercase :List[Any]=True ,__lowercase :str = "prob" ,__lowercase :int = 5 ,__lowercase :bool = True ,**__lowercase :Optional[Any] ,):
# time series specific configuration
snake_case__ : Optional[Any] = prediction_length
snake_case__ : Tuple = context_length or prediction_length
snake_case__ : Dict = distribution_output
snake_case__ : List[str] = loss
snake_case__ : Dict = input_size
snake_case__ : str = num_time_features
snake_case__ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case__ : List[str] = scaling
snake_case__ : List[str] = num_dynamic_real_features
snake_case__ : List[str] = num_static_real_features
snake_case__ : int = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case__ : Optional[Any] = cardinality
else:
snake_case__ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case__ : str = embedding_dimension
else:
snake_case__ : Any = [min(5_0 ,(cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
snake_case__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case__ : str = d_model
snake_case__ : List[Any] = encoder_attention_heads
snake_case__ : Tuple = decoder_attention_heads
snake_case__ : Union[str, Any] = encoder_ffn_dim
snake_case__ : Union[str, Any] = decoder_ffn_dim
snake_case__ : List[str] = encoder_layers
snake_case__ : List[str] = decoder_layers
snake_case__ : List[Any] = dropout
snake_case__ : Tuple = attention_dropout
snake_case__ : Any = activation_dropout
snake_case__ : List[Any] = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : Dict = activation_function
snake_case__ : Tuple = init_std
snake_case__ : int = use_cache
# Informer
snake_case__ : Optional[Any] = attention_type
snake_case__ : Optional[int] = sampling_factor
snake_case__ : Dict = distil
super().__init__(is_encoder_decoder=__lowercase ,**__lowercase )
@property
def __lowerCamelCase ( self :Optional[int] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 230 |
A__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
assert len(str(__lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case__ : Optional[int] = year // 100
snake_case__ : List[str] = (5 * (century % 4) + 2) % 7
snake_case__ : Dict = year % 100
snake_case__ : Union[str, Any] = centurian % 12
snake_case__ : List[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case__ : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case__ : List[str] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
__snake_case : List[Any] = MaskFormerConfig(backbone_config=__lowerCamelCase )
__snake_case : List[Any] = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
__snake_case : Any = 8_4_7
__snake_case : List[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
__snake_case : Optional[int] = 1_5_0
__snake_case : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
__snake_case : Optional[Any] = 1_7_1
__snake_case : List[str] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
__snake_case : Optional[int] = 1_3_3
__snake_case : int = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
__snake_case : Union[str, Any] = 1_9
__snake_case : Dict = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
__snake_case : Any = 6_5
__snake_case : Any = "mapillary-vistas-id2label.json"
__snake_case : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : Tuple = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = dct.pop(__lowerCamelCase )
__snake_case : Any = val
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Tuple = in_proj_weight[:dim, :]
__snake_case : Tuple = in_proj_bias[: dim]
__snake_case : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
__snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
__snake_case : str = in_proj_weight[
-dim :, :
]
__snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# fmt: off
__snake_case : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : List[str] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[: hidden_size, :]
__snake_case : Optional[int] = in_proj_bias[:config.hidden_size]
__snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Any = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Tuple = in_proj_weight[-hidden_size :, :]
__snake_case : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : Optional[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : int = in_proj_weight[: hidden_size, :]
__snake_case : Tuple = in_proj_bias[:config.hidden_size]
__snake_case : str = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Optional[Any] = in_proj_weight[-hidden_size :, :]
__snake_case : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ):
__snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : Optional[int] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase , "rb" ) as f:
__snake_case : int = pickle.load(__lowerCamelCase )
__snake_case : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__snake_case : Tuple = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__snake_case : int = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
__snake_case : List[str] = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase , param.shape )
__snake_case , __snake_case : List[str] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__snake_case : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__snake_case : Optional[int] = 6_5
elif "cityscapes" in model_name:
__snake_case : Optional[int] = 6_5_5_3_5
else:
__snake_case : Union[str, Any] = 2_5_5
__snake_case : Union[str, Any] = True if "ade" in model_name else False
__snake_case : str = MaskFormerImageProcessor(ignore_index=__lowerCamelCase , reduce_labels=__lowerCamelCase )
__snake_case : List[str] = image_processor(__lowerCamelCase , return_tensors="pt" )
__snake_case : Tuple = model(**__lowerCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__snake_case : Optional[Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 134 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 182 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __UpperCamelCase )-> str:
if "://" in dataset_path:
UpperCamelCase = dataset_path.split("""://""" )[1]
return dataset_path
def lowercase__ ( __UpperCamelCase )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def lowercase__ ( )-> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 321 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ = model(_lowercase , labels=_lowercase ).loss
SCREAMING_SNAKE_CASE__ = -tf.math.reduce_mean(_lowercase ).numpy()
SCREAMING_SNAKE_CASE__ = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 204 | def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [0] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
SCREAMING_SNAKE_CASE__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCamelCase : int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 204 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : Any = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """data2vec-vision"""
def __init__( self , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=224 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=True , __UpperCamelCase=[3, 5, 7, 11] , __UpperCamelCase=[1, 2, 3, 6] , __UpperCamelCase=True , __UpperCamelCase=0.4 , __UpperCamelCase=256 , __UpperCamelCase=1 , __UpperCamelCase=False , __UpperCamelCase=255 , **__UpperCamelCase , ) -> List[str]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
snake_case__ : List[Any] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Any = initializer_range
snake_case__ : Tuple = layer_norm_eps
snake_case__ : Optional[int] = image_size
snake_case__ : Optional[Any] = patch_size
snake_case__ : List[Any] = num_channels
snake_case__ : List[str] = use_mask_token
snake_case__ : Union[str, Any] = use_absolute_position_embeddings
snake_case__ : str = use_relative_position_bias
snake_case__ : Optional[Any] = use_shared_relative_position_bias
snake_case__ : str = layer_scale_init_value
snake_case__ : Dict = drop_path_rate
snake_case__ : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ : Optional[Any] = out_indices
snake_case__ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ : str = use_auxiliary_head
snake_case__ : List[str] = auxiliary_loss_weight
snake_case__ : Optional[Any] = auxiliary_channels
snake_case__ : Dict = auxiliary_num_convs
snake_case__ : Optional[int] = auxiliary_concat_input
snake_case__ : str = semantic_loss_ignore_index
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ) -> float:
'''simple docstring'''
return 1E-4
| 143 | import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase__ : Union[str, Any] = '''http://www.mocksite.com/file1.txt'''
lowerCAmelCase__ : Optional[Any] = '''"text": ["foo", "foo"]'''
lowerCAmelCase__ : List[str] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class __snake_case :
__lowerCamelCase = 200
__lowerCamelCase = {"""Content-Length""": """100"""}
__lowerCamelCase = {}
def __a ( self , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return [bytes(__UpperCamelCase , 'utf-8' )]
def UpperCamelCase__ ( *A__ , **A__ ) -> Optional[Any]:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
import requests
monkeypatch.setattr(A__ , 'request' , A__ )
snake_case__ : Any = URL
if issubclass(A__ , A__ ):
snake_case__ : Optional[Any] = url
elif issubclass(A__ , A__ ):
snake_case__ : Dict = [url]
elif issubclass(A__ , A__ ):
snake_case__ : Any = {'train': url}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[str] = 'downloads'
snake_case__ : int = tmp_path
snake_case__ : Tuple = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
snake_case__ : Any = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Any = dl_manager.download(A__ )
snake_case__ : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
snake_case__ : int = [downloaded_paths]
snake_case__ : Any = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
snake_case__ : Union[str, Any] = downloaded_paths.values()
snake_case__ : Any = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
snake_case__ : int = Path(A__ )
snake_case__ : Optional[int] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
snake_case__ : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
snake_case__ : int = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
snake_case__ : int = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
snake_case__ : Tuple = str(A__ )
if issubclass(A__ , A__ ):
snake_case__ : Dict = filename
elif issubclass(A__ , A__ ):
snake_case__ : Any = [filename]
elif issubclass(A__ , A__ ):
snake_case__ : Dict = {'train': filename}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[Any] = xz_file.parent
snake_case__ : Dict = 'extracted'
snake_case__ : List[Any] = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
snake_case__ : Optional[int] = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Optional[Any] = dl_manager.extract(A__ )
snake_case__ : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
snake_case__ : str = [extracted_paths]
snake_case__ : Dict = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
snake_case__ : Any = extracted_paths.values()
snake_case__ : Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
snake_case__ : Optional[int] = Path(A__ )
snake_case__ : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
snake_case__ : Dict = extracted_path.read_text()
snake_case__ : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(A__ , start=1 ):
snake_case__ : Optional[int] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
snake_case__ : Tuple = request.getfixturevalue(A__ )
snake_case__ : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> int:
snake_case__ : List[Any] = request.getfixturevalue(A__ )
snake_case__ : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase__ ( A__ ) -> Union[str, Any]:
snake_case__ : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 143 | 1 |
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =-1
__UpperCamelCase =0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__UpperCamelCase =(n * n - 2 * a * n) // (2 * n - 2 * a)
__UpperCamelCase =n - a - b
if c * c == (a * a + b * b):
__UpperCamelCase =a * b * c
if candidate >= product:
__UpperCamelCase =candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 85 | """simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase ():
"""simple docstring"""
from torch.utils.cpp_extension import load
__UpperCamelCase =Path(__UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__UpperCamelCase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCamelCase , with_cuda=__UpperCamelCase , extra_include_paths=[str(__UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 85 | 1 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
UpperCAmelCase : List[Any] = b * b - 4 * a * c
UpperCAmelCase : int = (-b + sqrt(_lowerCAmelCase )) / (2 * a)
UpperCAmelCase : List[Any] = (-b - sqrt(_lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def snake_case_ ( ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 23 |
from torch import nn
def lowerCAmelCase_ ( A_):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}")
| 149 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = value
lowercase_ = None
lowercase_ = None
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , lowerCAmelCase_ : Node):
"""simple docstring"""
lowercase_ = tree
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Node | None):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self : Optional[int]):
"""simple docstring"""
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_( a__ = 1_000_000 , a__ = 10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = defaultdict(__snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE : List[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE : Any = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 313 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: str =str(__lowerCAmelCase )
return len(__lowerCAmelCase ) == 9 and set(__lowerCAmelCase ) == set("123456789" )
def lowerCAmelCase_ ( ) -> int | None:
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
lowerCamelCase__: Optional[int] =100002 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCamelCase__: List[str] =1002003 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 362 |
from math import pow
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase__: Optional[Any] =int(pow(__a , __a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase__ , lowerCamelCase__: int =backtrack(
__a , __a , current_number + 1 , __a , __a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase__ , lowerCamelCase__: Dict =backtrack(
__a , __a , current_number + 1 , __a , __a )
return current_sum, solutions_count
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(__a , __a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = SwinConfig()
_lowerCamelCase : str = swin_name.split('''_''' )
_lowerCamelCase : List[str] = name_split[1]
_lowerCamelCase : str = int(name_split[4] )
_lowerCamelCase : str = int(name_split[3][-1] )
if model_size == "tiny":
_lowerCamelCase : int = 96
_lowerCamelCase : List[Any] = (2, 2, 6, 2)
_lowerCamelCase : Tuple = (3, 6, 12, 24)
elif model_size == "small":
_lowerCamelCase : Union[str, Any] = 96
_lowerCamelCase : Tuple = (2, 2, 18, 2)
_lowerCamelCase : List[Any] = (3, 6, 12, 24)
elif model_size == "base":
_lowerCamelCase : Union[str, Any] = 1_28
_lowerCamelCase : List[Any] = (2, 2, 18, 2)
_lowerCamelCase : Tuple = (4, 8, 16, 32)
else:
_lowerCamelCase : Union[str, Any] = 1_92
_lowerCamelCase : int = (2, 2, 18, 2)
_lowerCamelCase : List[str] = (6, 12, 24, 48)
if "in22k" in swin_name:
_lowerCamelCase : Optional[int] = 2_18_41
else:
_lowerCamelCase : str = 10_00
_lowerCamelCase : Optional[int] = '''huggingface/label-files'''
_lowerCamelCase : List[str] = '''imagenet-1k-id2label.json'''
_lowerCamelCase : Dict = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : List[str] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[str] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : int = img_size
_lowerCamelCase : Dict = num_classes
_lowerCamelCase : List[str] = embed_dim
_lowerCamelCase : Dict = depths
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : str = window_size
return config
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
if "patch_embed.proj" in name:
_lowerCamelCase : List[str] = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase : Any = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
_lowerCamelCase : Dict = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase : Optional[int] = name.replace('''attn''', '''attention.self''' )
if "norm1" in name:
_lowerCamelCase : Optional[int] = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase : str = name.replace('''norm2''', '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase : str = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase : Dict = name.replace('''mlp.fc2''', '''output.dense''' )
if name == "norm.weight":
_lowerCamelCase : Optional[int] = '''layernorm.weight'''
if name == "norm.bias":
_lowerCamelCase : int = '''layernorm.bias'''
if "head" in name:
_lowerCamelCase : Optional[Any] = name.replace('''head''', '''classifier''' )
else:
_lowerCamelCase : Tuple = '''swin.''' + name
return name
def snake_case_ ( A_ : Optional[int], A_ : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[Any] = orig_state_dict.pop(A_ )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCamelCase : Any = key.split('''.''' )
_lowerCamelCase : Optional[int] = int(key_split[1] )
_lowerCamelCase : List[Any] = int(key_split[3] )
_lowerCamelCase : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : Dict = val[
dim : dim * 2, :
]
_lowerCamelCase : List[str] = val[-dim:, :]
else:
_lowerCamelCase : List[Any] = val[
:dim
]
_lowerCamelCase : List[Any] = val[
dim : dim * 2
]
_lowerCamelCase : List[Any] = val[
-dim:
]
else:
_lowerCamelCase : Optional[Any] = val
return orig_state_dict
def snake_case_ ( A_ : Dict, A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = timm.create_model(A_, pretrained=A_ )
timm_model.eval()
_lowerCamelCase : List[str] = get_swin_config(A_ )
_lowerCamelCase : List[str] = SwinForImageClassification(A_ )
model.eval()
_lowerCamelCase : Union[str, Any] = convert_state_dict(timm_model.state_dict(), A_ )
model.load_state_dict(A_ )
_lowerCamelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''', '''-''' ) ) )
_lowerCamelCase : Optional[int] = Image.open(requests.get(A_, stream=A_ ).raw )
_lowerCamelCase : Tuple = image_processor(images=A_, return_tensors='''pt''' )
_lowerCamelCase : List[str] = timm_model(inputs['''pixel_values'''] )
_lowerCamelCase : str = model(**A_ ).logits
assert torch.allclose(A_, A_, atol=1E-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 72 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """informer"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "student_t" , SCREAMING_SNAKE_CASE_ : str = "nll" , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : List[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 1_0_0 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str = "prob" , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ):
# time series specific configuration
lowerCAmelCase_ : Dict = prediction_length
lowerCAmelCase_ : List[str] = context_length or prediction_length
lowerCAmelCase_ : List[Any] = distribution_output
lowerCAmelCase_ : int = loss
lowerCAmelCase_ : Optional[int] = input_size
lowerCAmelCase_ : Tuple = num_time_features
lowerCAmelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ : int = scaling
lowerCAmelCase_ : List[Any] = num_dynamic_real_features
lowerCAmelCase_ : Union[str, Any] = num_static_real_features
lowerCAmelCase_ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : str = cardinality
else:
lowerCAmelCase_ : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : Optional[int] = embedding_dimension
else:
lowerCAmelCase_ : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : Optional[Any] = decoder_attention_heads
lowerCAmelCase_ : Any = encoder_ffn_dim
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = encoder_layers
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : Dict = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : Dict = encoder_layerdrop
lowerCAmelCase_ : str = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Union[str, Any] = use_cache
# Informer
lowerCAmelCase_ : Optional[int] = attention_type
lowerCAmelCase_ : Any = sampling_factor
lowerCAmelCase_ : int = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 224 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : list ):
'''simple docstring'''
if any(not isinstance(snake_case , snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 369 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
while b:
snake_case_ , snake_case_ = b, a % b
return a
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(snake_case , a % b )
def UpperCamelCase_( ):
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 92 | 0 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCamelCase = str(bin(UpperCamelCase_ ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(UpperCamelCase_ ) )[2:]
UpperCamelCase = max(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase_ ) , b_binary.zfill(UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 30}
UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize_and_center_crop
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 343 | 1 |
'''simple docstring'''
__lowercase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602176634E-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_58_18,
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__a : Optional[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(_snake_case )}"""
)
raise ValueError(_snake_case )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294 | 0 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
A_ = number_of_bytes // partitions
A_ = []
for i in range(_UpperCamelCase ):
A_ = i * bytes_per_partition + 1
A_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 | """simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="replace" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase=False , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
super().__init__(
errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
snake_case = json.load(lowerCAmelCase )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase , encoding='utf-8' ) as merges_handle:
snake_case = merges_handle.read().split('\n' )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case ( self ):
"""simple docstring"""
return len(self.encoder )
def snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
snake_case = tuple(lowerCAmelCase )
snake_case = get_pairs(lowerCAmelCase )
if not pairs:
return token
while True:
snake_case = min(lowerCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case ,snake_case = bigram
snake_case = []
snake_case = 0
while i < len(lowerCAmelCase ):
try:
snake_case = word.index(lowerCAmelCase , lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(lowerCAmelCase )
snake_case = new_word
if len(lowerCAmelCase ) == 1:
break
else:
snake_case = get_pairs(lowerCAmelCase )
snake_case = ' '.join(lowerCAmelCase )
snake_case = word
return word
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for token in re.findall(self.pat , lowerCAmelCase ):
snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(' ' ) )
return bpe_tokens
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = ''.join(lowerCAmelCase )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' )
snake_case = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
snake_case = token_index
writer.write(' '.join(lowerCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase=False , **lowerCAmelCase ):
"""simple docstring"""
snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()):
snake_case = ' ' + text
return (text, kwargs)
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = super()._pad(
encoded_inputs=lowerCAmelCase , max_length=lowerCAmelCase , padding_strategy=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowerCAmelCase )
if needs_to_be_padded:
snake_case = len(lowerCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 150 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__A =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCamelCase__ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return max(metric_fn(lowerCamelCase__ , lowerCamelCase__ ) for gt in ground_truths )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [line.strip() for line in open(lowerCamelCase__ , "r" ).readlines()]
lowerCamelCase_ = []
if args.gold_data_mode == "qa":
lowerCamelCase_ = pd.read_csv(lowerCamelCase__ , sep="\t" , header=lowerCamelCase__ )
for answer_list in data[1]:
lowerCamelCase_ = ast.literal_eval(lowerCamelCase__ )
answers.append(lowerCamelCase__ )
else:
lowerCamelCase_ = [line.strip() for line in open(lowerCamelCase__ , "r" ).readlines()]
lowerCamelCase_ = [[reference] for reference in references]
lowerCamelCase_ = lowerCamelCase_ = lowerCamelCase_ = 0
for prediction, ground_truths in zip(lowerCamelCase__ , lowerCamelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
fa += metric_max_over_ground_truths(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = 1_00.0 * em / total
lowerCamelCase_ = 1_00.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = args.k
lowerCamelCase_ = [line.strip() for line in open(lowerCamelCase__ , "r" ).readlines()]
lowerCamelCase_ = [line.strip() for line in open(lowerCamelCase__ , "r" ).readlines()]
lowerCamelCase_ = lowerCamelCase_ = 0
for hypo, reference in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = set(hypo.split("\t" )[:k] )
lowerCamelCase_ = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCamelCase_ = 1_00.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
def strip_title(lowerCamelCase__ ):
if title.startswith("\"" ):
lowerCamelCase_ = title[1:]
if title.endswith("\"" ):
lowerCamelCase_ = title[:-1]
return title
lowerCamelCase_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase__ , return_tensors="pt" , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )["input_ids"].to(args.device )
lowerCamelCase_ = rag_model.rag.question_encoder(lowerCamelCase__ )
lowerCamelCase_ = question_enc_outputs[0]
lowerCamelCase_ = rag_model.retriever(
lowerCamelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
lowerCamelCase_ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCamelCase_ = []
for docs in all_docs:
lowerCamelCase_ = [strip_title(lowerCamelCase__ ) for title in docs["title"]]
provenance_strings.append("\t".join(lowerCamelCase__ ) )
return provenance_strings
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with torch.no_grad():
lowerCamelCase_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase__ , return_tensors="pt" , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
lowerCamelCase_ = inputs_dict.input_ids.to(args.device )
lowerCamelCase_ = inputs_dict.attention_mask.to(args.device )
lowerCamelCase_ = rag_model.generate( # rag_model overwrites generate
lowerCamelCase__ , attention_mask=lowerCamelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCamelCase_ = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
if args.print_predictions:
for q, a in zip(lowerCamelCase__ , lowerCamelCase__ ):
logger.info("Q: {} - A: {}".format(lowerCamelCase__ , lowerCamelCase__ ) )
return answers
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=lowerCamelCase__ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=lowerCamelCase__ , choices=["exact", "compressed", "legacy"] , type=lowerCamelCase__ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=lowerCamelCase__ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=lowerCamelCase__ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=lowerCamelCase__ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=lowerCamelCase__ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=lowerCamelCase__ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=lowerCamelCase__ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=lowerCamelCase__ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=lowerCamelCase__ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=5_0 , type=lowerCamelCase__ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = {}
if args.model_type is None:
lowerCamelCase_ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
lowerCamelCase_ = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
lowerCamelCase_ = args.n_docs
if args.index_name is not None:
lowerCamelCase_ = args.index_name
if args.index_path is not None:
lowerCamelCase_ = args.index_path
else:
lowerCamelCase_ = BartForConditionalGeneration
lowerCamelCase_ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , lowerCamelCase__ )
lowerCamelCase_ = get_scores if args.eval_mode == "e2e" else get_precision_at_k
lowerCamelCase_ = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(lowerCamelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(lowerCamelCase__ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
lowerCamelCase_ = RagRetriever.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
lowerCamelCase_ = model_class.from_pretrained(lowerCamelCase__ , retriever=lowerCamelCase__ , **lowerCamelCase__ )
model.retriever.init_retrieval()
else:
lowerCamelCase_ = model_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
lowerCamelCase_ = []
for line in tqdm(lowerCamelCase__ ):
questions.append(line.strip() )
if len(lowerCamelCase__ ) == args.eval_batch_size:
lowerCamelCase_ = evaluate_batch_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
preds_file.write("\n".join(lowerCamelCase__ ) + "\n" )
preds_file.flush()
lowerCamelCase_ = []
if len(lowerCamelCase__ ) > 0:
lowerCamelCase_ = evaluate_batch_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
preds_file.write("\n".join(lowerCamelCase__ ) )
preds_file.flush()
score_fn(lowerCamelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__A =get_args()
main(args)
| 47 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__A =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features["label"].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features["label"].names
# Labels
lowerCamelCase_ = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel={str(lowerCamelCase__ ): label for i, label in enumerate(lowerCamelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(lowerCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCamelCase__ , max_length=data_args.max_seq_length , truncation=lowerCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCamelCase_ = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCamelCase_ = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCamelCase_ = predict_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCamelCase_ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCamelCase__ )
trainer.save_metrics("train" , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCamelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(lowerCamelCase__ , metric_key_prefix="predict" )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("predict" , lowerCamelCase__ )
trainer.save_metrics("predict" , lowerCamelCase__ )
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCamelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 47 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Any ):
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Any = mock.Mock()
UpperCAmelCase : Union[str, Any] = 5_0_0
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : Optional[Any] = HTTPError
UpperCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=__lowercase ) as mock_head:
UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __magic_name__ ( self : str ):
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : str = mock.Mock()
UpperCAmelCase : str = 5_0_0
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : List[Any] = HTTPError
UpperCAmelCase : Tuple = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=__lowercase ) as mock_head:
UpperCAmelCase : Tuple = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__ ( self : List[str] ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase : Optional[int] = tempfile.mktemp()
with open(__lowercase, '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', __lowercase )
UpperCAmelCase : Dict = AlbertTokenizer.from_pretrained(__lowercase )
finally:
os.remove(__lowercase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''', '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''', __lowercase )
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def __magic_name__ ( self : Any ):
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : List[str] = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __magic_name__ ( cls : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(__lowercase )
@classmethod
def __magic_name__ ( cls : Any ):
try:
delete_repo(token=cls._token, repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def __magic_name__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[str] = os.path.join(__lowercase, '''vocab.txt''' )
with open(__lowercase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase : Tuple = BertTokenizer(__lowercase )
tokenizer.push_to_hub('''test-tokenizer''', use_auth_token=self._token )
UpperCAmelCase : Dict = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase, repo_id='''test-tokenizer''', push_to_hub=__lowercase, use_auth_token=self._token )
UpperCAmelCase : Optional[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def __magic_name__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Union[str, Any] = os.path.join(__lowercase, '''vocab.txt''' )
with open(__lowercase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase : List[str] = BertTokenizer(__lowercase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''', use_auth_token=self._token )
UpperCAmelCase : List[str] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__lowercase, repo_id='''valid_org/test-tokenizer-org''', push_to_hub=__lowercase, use_auth_token=self._token )
UpperCAmelCase : Dict = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def __magic_name__ ( self : Dict ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Optional[int] = os.path.join(__lowercase, '''vocab.txt''' )
with open(__lowercase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase : List[str] = CustomTokenizer(__lowercase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
UpperCAmelCase : str = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[str] = os.path.join(__lowercase, '''vocab.txt''' )
with open(__lowercase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase : List[Any] = BertTokenizerFast.from_pretrained(__lowercase )
bert_tokenizer.save_pretrained(__lowercase )
UpperCAmelCase : Optional[Any] = CustomTokenizerFast.from_pretrained(__lowercase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
UpperCAmelCase : str = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizerFast''' )
UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''', use_fast=__lowercase, trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Optional[int] = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Optional[Any] = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ), ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ), ['''BC''', '''A'''] )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[Any] = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[Any] = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ), ['''AB''', '''C'''] )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Any = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ), ['''ABC''', '''D'''] )
def __magic_name__ ( self : List[str] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase : Optional[int] = Trie()
UpperCAmelCase : str = trie.cut_text('''ABC''', [0, 0, 2, 1, 2, 3] )
self.assertEqual(__lowercase, ['''AB''', '''C'''] )
| 336 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : torch.FloatTensor
class a__ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__(self , __lowercase = 3 , __lowercase = 3 , __lowercase = ("DownEncoderBlock2D",) , __lowercase = ("UpDecoderBlock2D",) , __lowercase = (64,) , __lowercase = 1 , __lowercase = "silu" , __lowercase = 3 , __lowercase = 32 , __lowercase = 2_56 , __lowercase = 32 , __lowercase = None , __lowercase = 0.1_8_2_1_5 , __lowercase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , )
__lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
__lowerCAmelCase = VectorQuantizer(__lowercase , __lowercase , beta=0.2_5 , remap=__lowercase , sane_index_shape=__lowercase )
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , norm_type=__lowercase , )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = self.encoder(__lowercase )
__lowerCAmelCase = self.quant_conv(__lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowercase )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = False , __lowercase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.quantize(__lowercase )
else:
__lowerCAmelCase = h
__lowerCAmelCase = self.post_quant_conv(__lowercase )
__lowerCAmelCase = self.decoder(__lowercase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(__lowercase ).latents
__lowerCAmelCase = self.decode(__lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
| 174 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase: Union[str, Any] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE__ ) )
class UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
with TemporaryDirectory() as tmp_dir:
_lowercase : str = dataset_module_factory(a_ ,cache_dir=a_ )
_lowercase : Dict = import_main_class(dataset_module.module_path ,dataset=a_ )
_lowercase : Optional[Any] = builder_cls(
cache_dir=a_ ,config_name=a_ ,hash=dataset_module.hash ,)
_lowercase : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a_ ).replace(os.sep ,"""/""" ),
config.DATASET_INFO_FILENAME,
] )
_lowercase : int = cached_path(a_ ,cache_dir=a_ )
self.assertTrue(os.path.exists(a_ ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[str] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
_lowercase : int = dataset_module_factory("""wikipedia""" , cache_dir=_UpperCamelCase )
_lowercase : Dict = import_main_class(dataset_module.module_path )
_lowercase : Optional[int] = builder_cls(
cache_dir=_UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_lowercase : Union[str, Any] = None
builder_instance.download_and_prepare()
_lowercase : List[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = dataset_module_factory("""wikipedia""" , cache_dir=_UpperCamelCase )
_lowercase : List[str] = import_main_class(dataset_module.module_path , dataset=_UpperCamelCase )
_lowercase : Union[str, Any] = builder_cls(
cache_dir=_UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
_lowercase : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _UpperCamelCase )
assert next(iter(ds["""train"""] ) )
| 359 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 0 |
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> Any:
lowercase__ : Tuple = {}
def _lowerCAmelCase( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowerCAmelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCAmelCase ) for j in self.vertex[i]] ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCAmelCase )
else:
# else make a new vertex
lowercase__ : Optional[Any] = [to_vertex]
def _lowerCAmelCase( self ) -> None:
# visited array for storing already visited nodes
lowercase__ : Any = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# mark start vertex as visited
lowercase__ : str = True
print(__lowerCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__a: List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 198 | '''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__a: Tuple = None
__a: Tuple = logging.get_logger(__name__)
__a: Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a: Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__a: Tuple = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = TaTokenizer
SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=100 , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Union[str, Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(__lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ : Dict = len(set(filter(lambda __lowerCAmelCase : bool('''extra_id_''' in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : Optional[int] = False if not self.vocab_file else True
lowercase__ : Any = extra_ids
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __lowerCAmelCase , )
return max_model_length
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCAmelCase( self ) -> List[Any]:
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCAmelCase( self ) -> Tuple:
return [self.convert_tokens_to_ids(__lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 198 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase : Union[str, Any] = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(__A , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 160 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase : Union[str, Any] = "src/diffusers"
# Matches is_xxx_available()
lowerCamelCase : Dict = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCamelCase : Union[str, Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCamelCase : Any = "\n{0} = None\n"
lowerCamelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCamelCase : str = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = _re_backend.findall(lowercase )
if len(lowercase ) == 0:
return None
return "_and_".join(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with open(os.path.join(lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase_ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ = 0
lowerCamelCase_ = {}
# Go through the end of the file
while line_index < len(lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase ) and len(lines[line_index] ) > 1:
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_single_line_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase ) > 0:
lowerCamelCase_ = objects
else:
line_index += 1
return backend_specific_objects
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase , lowercase )
else:
return DUMMY_CLASS.format(lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int]=None ):
'''simple docstring'''
if backend_specific_objects is None:
lowerCamelCase_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ = '[' + ', '.join(f"""\"{b}\"""" for b in backend.split('_and_' ) ) + ']'
lowerCamelCase_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase , lowercase ) for o in objects] )
lowerCamelCase_ = dummy_file
return dummy_files
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int]=False ):
'''simple docstring'''
lowerCamelCase_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
lowerCamelCase_ = os.path.join(lowercase , 'utils' )
lowerCamelCase_ = {
backend: os.path.join(lowercase , f"""dummy_{short_names.get(lowercase , lowercase )}_objects.py""" )
for backend in dummy_files.keys()
}
lowerCamelCase_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase ):
with open(lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase_ = f.read()
else:
lowerCamelCase_ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py as the main """
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f"""diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py. Run `make fix-copies` """
'to fix this.' )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 204 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A ( __magic_name__ ):
lowercase__ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__magic_name__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase__ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
lowercase__ = [[0.0, 0.0], [0.0, 0.0]]
lowercase__ , lowercase__ = matrix[1][1], matrix[0][0]
lowercase__ , lowercase__ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__magic_name__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__magic_name__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase__ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
lowercase__ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase__ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase__ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase__ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase__ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase__ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase__ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase__ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase__ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase__ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase__ = array(__magic_name__ )
for i in range(3 ):
for j in range(3 ):
lowercase__ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase__ = array(__magic_name__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__magic_name__ )
# Calculate the inverse of the matrix
return [[float(d(__magic_name__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 201 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase ( lowercase_ ):
def __init__( self :str , _lowercase :Optional[NestedDataStructureLike[PathLike]] = None , _lowercase :Optional[NamedSplit] = None , _lowercase :Optional[Features] = None , _lowercase :str = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :Optional[int] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = path_or_paths
lowercase__ = split if split or isinstance(_lowercase , _lowercase ) else "train"
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
pass
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[Any] , _lowercase :Optional[Features] = None , _lowercase :str = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :Optional[int] = None , **_lowercase :Optional[int] , ):
'''simple docstring'''
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def UpperCAmelCase ( self :int ):
'''simple docstring'''
pass
| 201 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _A ( UpperCamelCase__):
SCREAMING_SNAKE_CASE : int = '''wavlm'''
def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE="group" , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=320 , _SCREAMING_SNAKE_CASE=800 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=320 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , _SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_norm
SCREAMING_SNAKE_CASE_ : Optional[Any] = feat_extract_activation
SCREAMING_SNAKE_CASE_ : Optional[int] = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = conv_bias
SCREAMING_SNAKE_CASE_ : List[str] = num_buckets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_bucket_distance
SCREAMING_SNAKE_CASE_ : Any = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ : int = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ : List[str] = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE_ : str = attention_dropout
SCREAMING_SNAKE_CASE_ : List[Any] = activation_dropout
SCREAMING_SNAKE_CASE_ : List[str] = feat_proj_dropout
SCREAMING_SNAKE_CASE_ : Dict = final_dropout
SCREAMING_SNAKE_CASE_ : Any = layerdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : str = num_ctc_classes
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ : str = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : List[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE_ : Dict = mask_time_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE_ : Any = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ : Any = num_codevector_groups
SCREAMING_SNAKE_CASE_ : List[str] = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ : Any = num_negatives
SCREAMING_SNAKE_CASE_ : Tuple = codevector_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = proj_codevector_dim
SCREAMING_SNAKE_CASE_ : List[str] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ : List[str] = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : int = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_ : Union[str, Any] = add_adapter
SCREAMING_SNAKE_CASE_ : Optional[Any] = adapter_kernel_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = adapter_stride
SCREAMING_SNAKE_CASE_ : int = num_adapter_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : str = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = xvector_output_dim
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 253 |
from collections import deque
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = process_name # process name
UpperCamelCase__: Optional[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCamelCase__: Tuple = arrival_time
UpperCamelCase__: str = burst_time # remaining burst time
UpperCamelCase__: int = 0 # total time of the process wait in ready queue
UpperCamelCase__: List[Any] = 0 # time from arrival time to completion time
class _a :
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: list[int] , __lowerCamelCase: deque[Process] , __lowerCamelCase: int , ):
'''simple docstring'''
UpperCamelCase__: List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCamelCase__: Optional[Any] = time_slices
# unfinished process is in this ready_queue
UpperCamelCase__: Optional[int] = queue
# current time
UpperCamelCase__: Any = current_time
# finished process is in this sequence queue
UpperCamelCase__: deque[Process] = deque()
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Dict = []
for i in range(len(__lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: int = []
for i in range(len(__lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = []
for i in range(len(__lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of finished process
while len(__lowerCamelCase ) != 0:
UpperCamelCase__: int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCamelCase__: Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCamelCase__: Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCamelCase__: List[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: deque[Process] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCamelCase ) ):
UpperCamelCase__: str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCamelCase__: Optional[int] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCamelCase__: Optional[int] = 0
# set the finish time
UpperCamelCase__: Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
UpperCamelCase__: Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
UpperCamelCase__ , UpperCamelCase__: Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A__: Any = Process('''P1''', 0, 53)
A__: Tuple = Process('''P2''', 0, 17)
A__: Tuple = Process('''P3''', 0, 68)
A__: Tuple = Process('''P4''', 0, 24)
A__: Any = 3
A__: str = [17, 25]
A__: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A__: str = Process('''P1''', 0, 53)
A__: Union[str, Any] = Process('''P2''', 0, 17)
A__: Optional[Any] = Process('''P3''', 0, 68)
A__: str = Process('''P4''', 0, 24)
A__: Any = 3
A__: Optional[Any] = [17, 25]
A__: Any = deque([Pa, Pa, Pa, Pa])
A__: Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A__: str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 149 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase : int = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = UNetaDModel
_A = 'sample'
@property
def _lowerCamelCase ( self :Any ) -> Union[str, Any]:
__UpperCamelCase : Dict = 4
__UpperCamelCase : Dict = 3
__UpperCamelCase : str = (3_2, 3_2)
__UpperCamelCase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a )
__UpperCamelCase : Union[str, Any] = torch.tensor([1_0] ).to(a )
return {"sample": noise, "timestep": time_step}
@property
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[int]:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self :Any ) -> Dict:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : Dict = {
"block_out_channels": (3_2, 6_4),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 3_2,
}
__UpperCamelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = UNetaDModel
_A = 'sample'
@property
def _lowerCamelCase ( self :Tuple ) -> List[Any]:
__UpperCamelCase : List[Any] = 4
__UpperCamelCase : Any = 4
__UpperCamelCase : str = (3_2, 3_2)
__UpperCamelCase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a )
__UpperCamelCase : Optional[Any] = torch.tensor([1_0] ).to(a )
return {"sample": noise, "timestep": time_step}
@property
def _lowerCamelCase ( self :Tuple ) -> str:
return (4, 3_2, 3_2)
@property
def _lowerCamelCase ( self :Optional[int] ) -> Union[str, Any]:
return (4, 3_2, 3_2)
def _lowerCamelCase ( self :Dict ) -> List[str]:
__UpperCamelCase : Optional[Any] = {
"sample_size": 3_2,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (3_2, 6_4),
"attention_head_dim": 3_2,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
__UpperCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self :Any ) -> int:
__UpperCamelCase , __UpperCamelCase : Dict = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=a )
self.assertIsNotNone(a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(a )
__UpperCamelCase : Optional[int] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _lowerCamelCase ( self :int ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : List[Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=a )
model.to(a )
__UpperCamelCase : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _lowerCamelCase ( self :int ) -> str:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__UpperCamelCase , __UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=a )
model_accelerate.to(a )
model_accelerate.eval()
__UpperCamelCase : Optional[int] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCamelCase : List[Any] = noise.to(a )
__UpperCamelCase : Any = torch.tensor([1_0] * noise.shape[0] ).to(a )
__UpperCamelCase : List[str] = model_accelerate(a , a )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__UpperCamelCase , __UpperCamelCase : Any = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=a , low_cpu_mem_usage=a )
model_normal_load.to(a )
model_normal_load.eval()
__UpperCamelCase : Optional[int] = model_normal_load(a , a )["sample"]
assert torch_all_close(a , a , rtol=1E-3 )
def _lowerCamelCase ( self :Any ) -> int:
__UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(a )
__UpperCamelCase : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCamelCase : List[Any] = noise.to(a )
__UpperCamelCase : Optional[int] = torch.tensor([1_0] * noise.shape[0] ).to(a )
with torch.no_grad():
__UpperCamelCase : Dict = model(a , a ).sample
__UpperCamelCase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCamelCase : int = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(a , a , rtol=1E-3 ) )
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = UNetaDModel
_A = 'sample'
@property
def _lowerCamelCase ( self :List[str] , a :Optional[Any]=(3_2, 3_2) ) -> Any:
__UpperCamelCase : Optional[int] = 4
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(a )
__UpperCamelCase : Any = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=a )
return {"sample": noise, "timestep": time_step}
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Dict:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self :Optional[int] ) -> Any:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self :Optional[Any] ) -> int:
__UpperCamelCase : int = {
"block_out_channels": [3_2, 6_4, 6_4, 6_4],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
__UpperCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def _lowerCamelCase ( self :Optional[int] ) -> Any:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=a )
self.assertIsNotNone(a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(a )
__UpperCamelCase : Tuple = self.dummy_input
__UpperCamelCase : List[Any] = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(a )
__UpperCamelCase : List[str] = noise
__UpperCamelCase : Any = model(**a )
assert image is not None, "Make sure output is not None"
@slow
def _lowerCamelCase ( self :List[Any] ) -> Any:
__UpperCamelCase : str = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(a )
__UpperCamelCase : Dict = 4
__UpperCamelCase : Dict = 3
__UpperCamelCase : Union[str, Any] = (2_5_6, 2_5_6)
__UpperCamelCase : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(a )
__UpperCamelCase : Tuple = torch.tensor(batch_size * [1E-4] ).to(a )
with torch.no_grad():
__UpperCamelCase : Dict = model(a , a ).sample
__UpperCamelCase : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCamelCase : Optional[int] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(a , a , rtol=1E-2 ) )
def _lowerCamelCase ( self :Optional[int] ) -> str:
__UpperCamelCase : List[Any] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(a )
__UpperCamelCase : str = 4
__UpperCamelCase : List[str] = 3
__UpperCamelCase : Dict = (3_2, 3_2)
__UpperCamelCase : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(a )
__UpperCamelCase : Any = torch.tensor(batch_size * [1E-4] ).to(a )
with torch.no_grad():
__UpperCamelCase : List[Any] = model(a , a ).sample
__UpperCamelCase : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCamelCase : str = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(a , a , rtol=1E-2 ) )
def _lowerCamelCase ( self :Optional[int] ) -> Union[str, Any]:
# not required for this model
pass | 151 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Tuple = '▁'
lowercase : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase : Optional[int] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
lowercase : List[Any] = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
lowercase : Optional[int] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = ['input_ids', 'attention_mask']
_A = []
_A = []
def __init__( self :Tuple , a :List[str] , a :int , a :Dict=None , a :List[Any]=None , a :List[str]="<s>" , a :str="</s>" , a :Dict="</s>" , a :Optional[Any]="<pad>" , a :Union[str, Any]="<unk>" , a :List[Any]="m2m100" , a :Optional[Dict[str, Any]] = None , a :List[str]=8 , **a :Tuple , ) -> None:
__UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase : List[str] = language_codes
__UpperCamelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a )
for lang_code in fairseq_language_code
if self.get_lang_token(a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a , tgt_lang=a , bos_token=a , eos_token=a , sep_token=a , unk_token=a , pad_token=a , language_codes=a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a , **a , )
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : List[str] = load_json(a )
__UpperCamelCase : Dict = {v: k for k, v in self.encoder.items()}
__UpperCamelCase : int = spm_file
__UpperCamelCase : List[Any] = load_spm(a , self.sp_model_kwargs )
__UpperCamelCase : int = len(self.encoder )
__UpperCamelCase : Tuple = {
self.get_lang_token(a ): self.encoder_size + i for i, lang_code in enumerate(a )
}
__UpperCamelCase : int = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a )}
__UpperCamelCase : Dict = {v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase : int = src_lang if src_lang is not None else "en"
__UpperCamelCase : int = tgt_lang
__UpperCamelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase : Union[str, Any] = num_madeup_words
@property
def _lowerCamelCase ( self :int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _lowerCamelCase ( self :List[str] ) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self :Any , a :str ) -> None:
__UpperCamelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self :int , a :str ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self :List[str] , a :str ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a , self.encoder[self.unk_token] )
def _lowerCamelCase ( self :List[Any] , a :int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a , self.unk_token )
def _lowerCamelCase ( self :List[str] , a :Optional[Any] ) -> Tuple:
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
__UpperCamelCase : List[Any] = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self :Optional[int] , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
__UpperCamelCase : Optional[Any] = [1] * len(self.prefix_tokens )
__UpperCamelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _lowerCamelCase ( self :List[Any] , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self :Dict ) -> Dict:
__UpperCamelCase : int = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.__dict__.copy()
__UpperCamelCase : int = None
return state
def __setstate__( self :List[Any] , a :Dict ) -> None:
__UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _lowerCamelCase ( self :List[Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
__UpperCamelCase : str = Path(a )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
__UpperCamelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a )
if os.path.abspath(self.spm_file ) != os.path.abspath(a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a )
elif not os.path.isfile(self.spm_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (str(a ), str(a ))
def _lowerCamelCase ( self :Dict , a :List[str] , a :str = "en" , a :Optional[List[str]] = None , a :str = "ro" , **a :Union[str, Any] , ) -> BatchEncoding:
__UpperCamelCase : List[str] = src_lang
__UpperCamelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a , a , **a )
def _lowerCamelCase ( self :Union[str, Any] , a :int , a :Optional[str] , a :Optional[str] , **a :List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__UpperCamelCase : int = src_lang
__UpperCamelCase : Tuple = self(a , add_special_tokens=a , **a )
__UpperCamelCase : Optional[int] = self.get_lang_id(a )
__UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowerCamelCase ( self :Any ) -> str:
self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self :Optional[int] ) -> Any:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self :Union[str, Any] , a :str ) -> None:
__UpperCamelCase : str = self.get_lang_token(a )
__UpperCamelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
__UpperCamelCase : Optional[int] = [self.cur_lang_id]
__UpperCamelCase : str = [self.eos_token_id]
def _lowerCamelCase ( self :int , a :str ) -> None:
__UpperCamelCase : Any = self.get_lang_token(a )
__UpperCamelCase : Dict = self.lang_token_to_id[lang_token]
__UpperCamelCase : List[Any] = [self.cur_lang_id]
__UpperCamelCase : Tuple = [self.eos_token_id]
def _lowerCamelCase ( self :Optional[Any] , a :str ) -> str:
return self.lang_code_to_token[lang]
def _lowerCamelCase ( self :Optional[Any] , a :str ) -> int:
__UpperCamelCase : Dict = self.get_lang_token(a )
return self.lang_token_to_id[lang_token]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
__UpperCamelCase : str = sentencepiece.SentencePieceProcessor(**_lowerCamelCase)
spm.Load(str(_lowerCamelCase))
return spm
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Union[Dict, List]:
'''simple docstring'''
with open(_lowerCamelCase , "r") as f:
return json.load(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : str) -> None:
'''simple docstring'''
with open(_lowerCamelCase , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2) | 151 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class snake_case :
"""simple docstring"""
_lowerCamelCase = MBartConfig
_lowerCamelCase = {}
_lowerCamelCase = "gelu"
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=20 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=0 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_ = prepare_mbart_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFMBartModel(config=UpperCamelCase ).get_decoder()
lowerCamelCase_ = inputs_dict["input_ids"]
lowerCamelCase_ = input_ids[:1, :]
lowerCamelCase_ = inputs_dict["attention_mask"][:1, :]
lowerCamelCase_ = inputs_dict["head_mask"]
lowerCamelCase_ = 1
# first forward pass
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , use_cache=UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = outputs.to_tuple()
lowerCamelCase_ = past_key_values[1]
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=None , ):
if attention_mask is None:
lowerCamelCase_ = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_lowerCamelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFMBartModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = [
" UN Chief Says There Is No Military Solution in Syria",
]
_lowerCamelCase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
_lowerCamelCase = "facebook/mbart-large-en-ro"
@cached_property
def snake_case ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.translate_src_text(**UpperCamelCase )
self.assertListEqual(self.expected_text , UpperCamelCase )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.tokenizer(self.src_text , **UpperCamelCase , return_tensors="tf" )
lowerCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowerCamelCase_ = self.tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
return generated_words
@slow
def snake_case ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 55 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : Any = logging.get_logger(__name__)
a_ : Optional[Any] = {"""vocab_file""": """spiece.model"""}
a_ : Tuple = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="<unk>" , UpperCamelCase="<sep>" , UpperCamelCase="<pad>" , UpperCamelCase="<cls>" , UpperCamelCase="<mask>" , UpperCamelCase=["<eop>", "<eod>"] , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowerCamelCase_ = 3
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowerCamelCase_ = jieba
lowerCamelCase_ = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case ( self ):
"""simple docstring"""
return len(self.sp_model )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if self.remove_space:
lowerCamelCase_ = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ = inputs
lowerCamelCase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ = unicodedata.normalize("NFKD" , UpperCamelCase )
lowerCamelCase_ = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] )
if self.do_lower_case:
lowerCamelCase_ = outputs.lower()
return outputs
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.preprocess_text(UpperCamelCase )
lowerCamelCase_ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
lowerCamelCase_ = []
for piece in pieces:
if len(UpperCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ = cur_pieces[1:]
else:
lowerCamelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase )
else:
new_pieces.append(UpperCamelCase )
return new_pieces
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "".join(UpperCamelCase ).replace(UpperCamelCase , " " ).strip()
return out_string
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1]
return ([0] * len(UpperCamelCase )) + [1, 1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = super()._decode(*UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 55 | 1 |
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ):
a : int = 1
@register_to_config
def __init__(self ,_lowerCamelCase=2000 ,_lowerCamelCase=0.1 ,_lowerCamelCase=20 ,_lowerCamelCase=1E-3 ) -> List[str]:
'''simple docstring'''
__lowercase = None
__lowercase = None
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Dict:
'''simple docstring'''
__lowercase = torch.linspace(1 ,self.config.sampling_eps ,_lowerCamelCase ,device=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase = std.unsqueeze(-1 )
__lowercase = -score / std
# compute
__lowercase = -1.0 / len(self.timesteps )
__lowercase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase = beta_t.unsqueeze(-1 )
__lowercase = -0.5 * beta_t * x
__lowercase = torch.sqrt(_lowerCamelCase )
__lowercase = drift - diffusion**2 * score
__lowercase = x + drift * dt
# add noise
__lowercase = randn_tensor(x.shape ,layout=x.layout ,generator=_lowerCamelCase ,device=x.device ,dtype=x.dtype )
__lowercase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> Optional[int]:
'''simple docstring'''
return self.config.num_train_timesteps
| 371 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : int=3 , _lowerCAmelCase : Union[str, Any]=3_0 , _lowerCAmelCase : Any=4_0_0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=0.9 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : List[str]=[0.5, 0.5, 0.5] , ) -> List[Any]:
"""simple docstring"""
snake_case_ = size if size is not None else {"shortest_edge": 3_0}
snake_case_ = crop_size if crop_size is not None else {"height": 3_0, "width": 3_0}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize_and_center_crop
snake_case_ = size
snake_case_ = crop_pct
snake_case_ = crop_size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
def lowerCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PoolFormerImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case_ = PoolFormerImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "crop_pct" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "image_std" ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 3_0} )
self.assertEqual(image_processor.crop_size , {"height": 3_0, "width": 3_0} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 159 |
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = 1.60_21e-19 # units = C
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[Any] = 'albert'
def __init__( self : Tuple ,_UpperCAmelCase : List[str]=30000 ,_UpperCAmelCase : Dict=128 ,_UpperCAmelCase : Dict=4096 ,_UpperCAmelCase : Tuple=12 ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : Any=64 ,_UpperCAmelCase : List[str]=16384 ,_UpperCAmelCase : Any=1 ,_UpperCAmelCase : str="gelu_new" ,_UpperCAmelCase : Any=0 ,_UpperCAmelCase : Union[str, Any]=0 ,_UpperCAmelCase : Optional[int]=512 ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : str=0.02 ,_UpperCAmelCase : Tuple=1E-12 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : int="absolute" ,_UpperCAmelCase : Optional[int]=0 ,_UpperCAmelCase : Any=2 ,_UpperCAmelCase : List[Any]=3 ,**_UpperCAmelCase : int ,):
super().__init__(pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = vocab_size
_a : List[str] = embedding_size
_a : Union[str, Any] = hidden_size
_a : List[Any] = num_hidden_layers
_a : List[Any] = num_hidden_groups
_a : int = num_attention_heads
_a : List[str] = inner_group_num
_a : int = hidden_act
_a : Optional[int] = intermediate_size
_a : List[str] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Any = max_position_embeddings
_a : str = type_vocab_size
_a : str = initializer_range
_a : Tuple = layer_norm_eps
_a : List[str] = classifier_dropout_prob
_a : str = position_embedding_type
class __magic_name__ ( _UpperCamelCase ):
@property
def __lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 358 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
__lowerCAmelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
__lowerCAmelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
__lowerCAmelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def __lowercase ( lowerCamelCase : SplitDict ):
UpperCamelCase_ : Tuple = split_dict._to_yaml_list()
assert len(lowerCamelCase ) == len(lowerCamelCase )
UpperCamelCase_ : Optional[int] = SplitDict._from_yaml_list(lowerCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCamelCase_ : Tuple = None
# the split name of split_dict takes over the name of the split info object
UpperCamelCase_ : Any = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=lowerCamelCase ), SplitInfo(dataset_name='my_dataset' )] )
def __lowercase ( lowerCamelCase : str ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCamelCase_ : Dict = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 175 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : int=False ):
try:
UpperCamelCase_ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase_ : List[str] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase_ : Union[str, Any] = strtobool(lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
a_ = parse_flag_from_env('RUN_SLOW', default=False)
def __lowercase ( lowerCamelCase : List[Any] ):
return unittest.skip('Test was skipped' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Optional[Any] ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Any ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Any ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[str] ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Tuple ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Tuple ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Optional[Any] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[Any] ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Any ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Tuple ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[int]=None ):
if test_case is None:
return partial(lowerCamelCase , version=lowerCamelCase )
return unittest.skipUnless(is_torch_version('>=' , lowerCamelCase ) , F"test requires torch version >= {version}" )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Dict ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(lowerCamelCase )
a_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __lowercase ( lowerCamelCase : Dict ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(lowerCamelCase )
class _lowercase ( unittest.TestCase ):
lowercase = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : str = tempfile.mkdtemp()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any ) -> Union[str, Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(snake_case )
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Union[mock.Mock, List[mock.Mock]] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : str = mocks if isinstance(snake_case , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : str = AcceleratorState()
UpperCamelCase_ : str = tensor[None].clone().to(state.device )
UpperCamelCase_ : List[Any] = gather(lowerCamelCase ).cpu()
UpperCamelCase_ : Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase ):
return False
return True
class _lowercase :
def __init__( self : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = returncode
UpperCamelCase_ : Optional[int] = stdout
UpperCamelCase_ : Optional[int] = stderr
async def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ):
while True:
UpperCamelCase_ : Tuple = await stream.readline()
if line:
callback(lowerCamelCase )
else:
break
async def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Dict=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=False ):
if echo:
print('\nRunning: ' , ' '.join(lowerCamelCase ) )
UpperCamelCase_ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase_ : str = []
UpperCamelCase_ : Union[str, Any] = []
def tee(lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : List[str]="" ):
UpperCamelCase_ : int = line.decode('utf-8' ).rstrip()
sink.append(lowerCamelCase )
if not quiet:
print(lowerCamelCase , lowerCamelCase , file=lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=lowerCamelCase , )
return _RunOutput(await p.wait() , lowerCamelCase , lowerCamelCase )
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int=None , lowerCamelCase : Any=180 , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]=True ):
UpperCamelCase_ : str = asyncio.get_event_loop()
UpperCamelCase_ : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(lowerCamelCase , env=lowerCamelCase , stdin=lowerCamelCase , timeout=lowerCamelCase , quiet=lowerCamelCase , echo=lowerCamelCase ) )
UpperCamelCase_ : int = ' '.join(lowerCamelCase )
if result.returncode > 0:
UpperCamelCase_ : Dict = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class _lowercase ( snake_case_ ):
pass
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[int]=False ):
try:
UpperCamelCase_ : Any = subprocess.check_output(lowerCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase , 'decode' ):
UpperCamelCase_ : Any = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(lowerCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 175 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 359 |
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 307 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.