code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = None
__snake_case = BloomTokenizerFast
__snake_case = BloomTokenizerFast
__snake_case = True
__snake_case = False
__snake_case = 'tokenizer_file'
__snake_case = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def UpperCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_SCREAMING_SNAKE_CASE : Optional[int] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Tuple = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
_SCREAMING_SNAKE_CASE : List[str] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
_SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_encode_plus(__lowerCamelCase )["input_ids"]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase=6 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_SCREAMING_SNAKE_CASE : Tuple = "This is a simple input"
_SCREAMING_SNAKE_CASE : str = ["This is a simple input 1", "This is a simple input 2"]
_SCREAMING_SNAKE_CASE : Dict = ("This is a simple input", "This is a pair")
_SCREAMING_SNAKE_CASE : Dict = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
_SCREAMING_SNAKE_CASE : List[str] = None # Hotfixing padding = None
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("xnli" , "all_languages" , split="test" , streaming=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = next(iter(__lowerCamelCase ) )["premise"] # pick up one data
_SCREAMING_SNAKE_CASE : Optional[Any] = list(sample_data.values() )
_SCREAMING_SNAKE_CASE : Optional[int] = list(map(tokenizer.encode , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = [tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) for x in output_tokens]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 325
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 1
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['prompt']
__snake_case = ['prompt', 'negative_prompt']
__snake_case = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__snake_case = False
@property
def UpperCamelCase_ ( self ) -> Any:
return 3_2
@property
def UpperCamelCase_ ( self ) -> Dict:
return 3_2
@property
def UpperCamelCase_ ( self ) -> List[str]:
return self.time_input_dim
@property
def UpperCamelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self ) -> str:
return 1_0_0
@property
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase_ ( self ) -> int:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_SCREAMING_SNAKE_CASE : List[str] = PriorTransformer(**__lowerCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
_SCREAMING_SNAKE_CASE : int = CLIPVisionModelWithProjection(__lowerCamelCase )
return model
@property
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : int = self.dummy_prior
_SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_image_encoder
_SCREAMING_SNAKE_CASE : Tuple = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE : str = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_image_processor
_SCREAMING_SNAKE_CASE : Union[str, Any] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=__lowerCamelCase , clip_sample_range=10.0 , )
_SCREAMING_SNAKE_CASE : str = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=0 ) -> Any:
if str(__lowerCamelCase ).startswith("mps" ):
_SCREAMING_SNAKE_CASE : int = torch.manual_seed(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Tuple = "cpu"
_SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output.image_embeds
_SCREAMING_SNAKE_CASE : str = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : Optional[int] = image[0, -1_0:]
_SCREAMING_SNAKE_CASE : Tuple = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
_SCREAMING_SNAKE_CASE : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Tuple = torch_device == "cpu"
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Optional[int] = False
self._test_inference_batch_single_identical(
test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , test_mean_pixel_difference=__lowerCamelCase , )
@skip_mps
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch_device == "cpu"
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowerCamelCase , test_mean_pixel_difference=__lowerCamelCase , )
| 325
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 1
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase__ =logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase__ ='cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=100, __lowerCamelCase=" " ):
_SCREAMING_SNAKE_CASE : List[Any] = text.split(__lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0, len(__lowerCamelCase ), __lowerCamelCase )]
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = [], []
for title, text in zip(documents["title"], documents["text"] ):
if text is not None:
for passage in split_text(__lowerCamelCase ):
titles.append(title if title is not None else "" )
texts.append(__lowerCamelCase )
return {"title": titles, "text": texts}
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ctx_tokenizer(
documents["title"], documents["text"], truncation=__lowerCamelCase, padding="longest", return_tensors="pt" )["input_ids"]
_SCREAMING_SNAKE_CASE : List[str] = ctx_encoder(input_ids.to(device=__lowerCamelCase ), return_dict=__lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_SCREAMING_SNAKE_CASE : Tuple = load_dataset(
"csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_SCREAMING_SNAKE_CASE : int = dataset.map(__lowerCamelCase, batched=__lowerCamelCase, num_proc=processing_args.num_proc )
# And compute the embeddings
_SCREAMING_SNAKE_CASE : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_SCREAMING_SNAKE_CASE : Union[str, Any] = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_SCREAMING_SNAKE_CASE : Tuple = dataset.map(
partial(__lowerCamelCase, ctx_encoder=__lowerCamelCase, ctx_tokenizer=__lowerCamelCase ), batched=__lowerCamelCase, batch_size=processing_args.batch_size, features=__lowerCamelCase, )
# And finally save your dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset" )
dataset.save_to_disk(__lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_SCREAMING_SNAKE_CASE : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings", custom_index=__lowerCamelCase )
# And save the index
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = field(
default=str(Path(__lowercase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
__snake_case = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
__snake_case = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
__snake_case = field(
default=str(Path(__lowercase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = field(
default=__lowercase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
__snake_case = field(
default=1_6 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = field(
default=7_6_8 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
__snake_case = field(
default=1_2_8 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase__ =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ =parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase__ =rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 325
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
__snake_case = 'swin'
__snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=4 , __lowerCamelCase=3 , __lowerCamelCase=9_6 , __lowerCamelCase=[2, 2, 6, 2] , __lowerCamelCase=[3, 6, 1_2, 2_4] , __lowerCamelCase=7 , __lowerCamelCase=4.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=0.02 , __lowerCamelCase=1E-5 , __lowerCamelCase=3_2 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Tuple:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
_SCREAMING_SNAKE_CASE : Any = patch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
_SCREAMING_SNAKE_CASE : Optional[int] = embed_dim
_SCREAMING_SNAKE_CASE : str = depths
_SCREAMING_SNAKE_CASE : Optional[int] = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = num_heads
_SCREAMING_SNAKE_CASE : Optional[int] = window_size
_SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
_SCREAMING_SNAKE_CASE : Tuple = qkv_bias
_SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = use_absolute_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_SCREAMING_SNAKE_CASE : Dict = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
_SCREAMING_SNAKE_CASE : Dict = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
return 1E-4
| 325
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 325
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 1
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = MODEL_FOR_CAUSAL_LM_MAPPING
__snake_case = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
_SCREAMING_SNAKE_CASE : List[Any] = text_generator("This is a test" , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_SCREAMING_SNAKE_CASE : Optional[int] = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
__lowerCamelCase , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_SCREAMING_SNAKE_CASE : Optional[Any] = text_generator("This is a test" , do_sample=__lowerCamelCase , num_return_sequences=2 , return_tensors=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{"generated_token_ids": ANY(__lowerCamelCase )},
{"generated_token_ids": ANY(__lowerCamelCase )},
] , )
_SCREAMING_SNAKE_CASE : List[Any] = text_generator.model.config.eos_token_id
_SCREAMING_SNAKE_CASE : Tuple = "<pad>"
_SCREAMING_SNAKE_CASE : Optional[int] = text_generator(
["This is a test", "This is a second test"] , do_sample=__lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCamelCase , )
self.assertEqual(
__lowerCamelCase , [
[
{"generated_token_ids": ANY(__lowerCamelCase )},
{"generated_token_ids": ANY(__lowerCamelCase )},
],
[
{"generated_token_ids": ANY(__lowerCamelCase )},
{"generated_token_ids": ANY(__lowerCamelCase )},
],
] , )
@require_tf
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
_SCREAMING_SNAKE_CASE : Optional[Any] = text_generator("This is a test" , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_SCREAMING_SNAKE_CASE : Tuple = text_generator(["This is a test", "This is a second test"] , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : str = TextGenerationPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Tuple = "Hello I believe in"
_SCREAMING_SNAKE_CASE : Any = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
_SCREAMING_SNAKE_CASE : int = text_generator(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_SCREAMING_SNAKE_CASE : List[Any] = text_generator(__lowerCamelCase , stop_sequence=" fe" )
self.assertEqual(__lowerCamelCase , [{"generated_text": "Hello I believe in fe"}] )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = text_generator.model
_SCREAMING_SNAKE_CASE : str = text_generator.tokenizer
_SCREAMING_SNAKE_CASE : Dict = text_generator("This is a test" )
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_SCREAMING_SNAKE_CASE : str = text_generator("This is a test" , return_full_text=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_SCREAMING_SNAKE_CASE : Any = pipeline(task="text-generation" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , return_full_text=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = text_generator("This is a test" )
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_SCREAMING_SNAKE_CASE : str = text_generator("This is a test" , return_full_text=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_SCREAMING_SNAKE_CASE : int = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
[{"generated_text": ANY(__lowerCamelCase )}, {"generated_text": ANY(__lowerCamelCase )}],
[{"generated_text": ANY(__lowerCamelCase )}, {"generated_text": ANY(__lowerCamelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_SCREAMING_SNAKE_CASE : List[Any] = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
[{"generated_text": ANY(__lowerCamelCase )}, {"generated_text": ANY(__lowerCamelCase )}],
[{"generated_text": ANY(__lowerCamelCase )}, {"generated_text": ANY(__lowerCamelCase )}],
] , )
with self.assertRaises(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = text_generator("test" , return_full_text=__lowerCamelCase , return_text=__lowerCamelCase )
with self.assertRaises(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = text_generator("test" , return_full_text=__lowerCamelCase , return_tensors=__lowerCamelCase )
with self.assertRaises(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = text_generator("test" , return_text=__lowerCamelCase , return_tensors=__lowerCamelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_SCREAMING_SNAKE_CASE : int = text_generator("" )
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_SCREAMING_SNAKE_CASE : Optional[int] = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_SCREAMING_SNAKE_CASE : int = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_0_0 , max_new_tokens=2_0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator("This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__lowerCamelCase ):
text_generator(
"This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self ) -> Optional[Any]:
import torch
# Classic `model_kwargs`
_SCREAMING_SNAKE_CASE : Any = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_SCREAMING_SNAKE_CASE : Tuple = pipe("This is a test" )
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_SCREAMING_SNAKE_CASE : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe("This is a test" )
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_SCREAMING_SNAKE_CASE : int = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_SCREAMING_SNAKE_CASE : List[str] = pipe("This is a test" )
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self ) -> List[str]:
import torch
_SCREAMING_SNAKE_CASE : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self ) -> Union[str, Any]:
import torch
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=__lowerCamelCase , top_p=0.5 )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = "Hello world"
_SCREAMING_SNAKE_CASE : Dict = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger("transformers.generation.tf_utils" )
else:
_SCREAMING_SNAKE_CASE : Any = logging.get_logger("transformers.generation.utils" )
_SCREAMING_SNAKE_CASE : Optional[Any] = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCamelCase ) as cl:
_SCREAMING_SNAKE_CASE : Tuple = text_generator(__lowerCamelCase , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__lowerCamelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowerCamelCase ) as cl:
_SCREAMING_SNAKE_CASE : Dict = text_generator(__lowerCamelCase , max_new_tokens=1 )
self.assertNotIn(__lowerCamelCase , cl.out )
with CaptureLogger(__lowerCamelCase ) as cl:
_SCREAMING_SNAKE_CASE : List[Any] = text_generator(__lowerCamelCase , max_length=1_0 )
self.assertNotIn(__lowerCamelCase , cl.out )
| 325
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 1
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ ='platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : int = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=4 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=0.02 , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Optional[int] = seq_length
_SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
_SCREAMING_SNAKE_CASE : int = use_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : str = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Any = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Any = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
_SCREAMING_SNAKE_CASE : int = initializer_range
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_SCREAMING_SNAKE_CASE : str = shift_tokens_right(__lowerCamelCase , 1 , 2 )
_SCREAMING_SNAKE_CASE : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_0
_SCREAMING_SNAKE_CASE : Tuple = model_class_name(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = model.encode(inputs_dict["input_ids"] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_SCREAMING_SNAKE_CASE : List[str] = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : str = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_SCREAMING_SNAKE_CASE : List[Any] = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = model.decode(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = 2_0
_SCREAMING_SNAKE_CASE : List[Any] = model_class_name(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model.encode(inputs_dict["input_ids"] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_SCREAMING_SNAKE_CASE : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : Any = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_SCREAMING_SNAKE_CASE : List[str] = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = 9_9
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[0]
_SCREAMING_SNAKE_CASE : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self._get_config_and_data()
_SCREAMING_SNAKE_CASE : Tuple = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = lm_model(input_ids=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_SCREAMING_SNAKE_CASE : int = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : Any = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : Tuple = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(__lowerCamelCase , 1 , 2 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
_SCREAMING_SNAKE_CASE : Optional[int] = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase__( __lowercase , unittest.TestCase , __lowercase ):
'''simple docstring'''
__snake_case = True
__snake_case = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__snake_case = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : str = FlaxBlenderbotModelTester(self )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest("JIT Enabled" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Optional[int] = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_SCREAMING_SNAKE_CASE : Tuple = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled" ):
_SCREAMING_SNAKE_CASE : List[str] = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : List[str] = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Tuple = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_SCREAMING_SNAKE_CASE : int = np.ones((1, 1) ) * model.config.eos_token_id
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5}
_SCREAMING_SNAKE_CASE : List[Any] = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
_SCREAMING_SNAKE_CASE : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["Sam"]
_SCREAMING_SNAKE_CASE : Dict = tokenizer(__lowerCamelCase , return_tensors="jax" )
_SCREAMING_SNAKE_CASE : List[Any] = model.generate(**__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = "Sam is a great name. It means \"sun\" in Gaelic."
_SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 325
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
_SCREAMING_SNAKE_CASE : List[str] = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(__lowerCamelCase )
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : int = load_dataset("nielsr/rvlcdip-demo" )
_SCREAMING_SNAKE_CASE : Optional[Any] = dataset["train"][0]["image"].convert("RGB" )
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = outputs.logits
_SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=__lowerCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
| 325
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ ={
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['MaskFormerFeatureExtractor']
UpperCamelCase__ =['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCamelCase__ =[
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 325
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCamelCase__ ='\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
UpperCamelCase__ ='\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
UpperCamelCase__ ='\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False ) -> int:
if rouge_types is None:
_SCREAMING_SNAKE_CASE : Tuple = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_SCREAMING_SNAKE_CASE : List[str] = rouge_scorer.RougeScorer(rouge_types=__lowerCamelCase , use_stemmer=__lowerCamelCase )
if use_aggregator:
_SCREAMING_SNAKE_CASE : int = scoring.BootstrapAggregator()
else:
_SCREAMING_SNAKE_CASE : Dict = []
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = scorer.score(__lowerCamelCase , __lowerCamelCase )
if use_aggregator:
aggregator.add_scores(__lowerCamelCase )
else:
scores.append(__lowerCamelCase )
if use_aggregator:
_SCREAMING_SNAKE_CASE : Optional[Any] = aggregator.aggregate()
else:
_SCREAMING_SNAKE_CASE : str = {}
for key in scores[0]:
_SCREAMING_SNAKE_CASE : Optional[int] = [score[key] for score in scores]
return result
| 325
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 1
|
import logging
import os
from .state import PartialState
class lowerCAmelCase__( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) -> Tuple:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
_SCREAMING_SNAKE_CASE : int = kwargs.pop("main_process_only" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
_SCREAMING_SNAKE_CASE : Tuple = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = None ):
if log_level is None:
_SCREAMING_SNAKE_CASE : Tuple = os.environ.get("ACCELERATE_LOG_LEVEL", __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__lowerCamelCase, {} )
| 325
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
__snake_case = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 325
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=0.999, __lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_SCREAMING_SNAKE_CASE : Tuple = []
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = i / num_diffusion_timesteps
_SCREAMING_SNAKE_CASE : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ), __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase, dtype=torch.floataa )
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
__snake_case = [e.name for e in KarrasDiffusionSchedulers]
__snake_case = 2
@register_to_config
def __init__( self , __lowerCamelCase = 1_0_0_0 , __lowerCamelCase = 0.0_0085 , __lowerCamelCase = 0.012 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "epsilon" , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = 1.0 , __lowerCamelCase = "linspace" , __lowerCamelCase = 0 , ) -> Optional[int]:
if trained_betas is not None:
_SCREAMING_SNAKE_CASE : int = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_SCREAMING_SNAKE_CASE : Tuple = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_SCREAMING_SNAKE_CASE : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_SCREAMING_SNAKE_CASE : Optional[int] = betas_for_alpha_bar(__lowerCamelCase , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
_SCREAMING_SNAKE_CASE : Tuple = betas_for_alpha_bar(__lowerCamelCase , alpha_transform_type="exp" )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_SCREAMING_SNAKE_CASE : Tuple = 1.0 - self.betas
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_karras_sigmas
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[int]:
if schedule_timesteps is None:
_SCREAMING_SNAKE_CASE : Tuple = self.timesteps
_SCREAMING_SNAKE_CASE : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_SCREAMING_SNAKE_CASE : List[Any] = 1 if len(__lowerCamelCase ) > 1 else 0
else:
_SCREAMING_SNAKE_CASE : str = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
_SCREAMING_SNAKE_CASE : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ ( self ) -> str:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , ) -> torch.FloatTensor:
_SCREAMING_SNAKE_CASE : Tuple = self.index_for_timestep(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.sigmas[step_index]
_SCREAMING_SNAKE_CASE : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , ) -> int:
_SCREAMING_SNAKE_CASE : int = num_inference_steps
_SCREAMING_SNAKE_CASE : Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_SCREAMING_SNAKE_CASE : Any = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_SCREAMING_SNAKE_CASE : Tuple = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_SCREAMING_SNAKE_CASE : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE : List[str] = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.log(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase )
if self.config.use_karras_sigmas:
_SCREAMING_SNAKE_CASE : Optional[Any] = self._convert_to_karras(in_sigmas=__lowerCamelCase , num_inference_steps=self.num_inference_steps )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([self._sigma_to_t(__lowerCamelCase , __lowerCamelCase ) for sigma in sigmas] )
_SCREAMING_SNAKE_CASE : str = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE : str = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith("mps" ):
# mps does not support float64
_SCREAMING_SNAKE_CASE : str = timesteps.to(__lowerCamelCase , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE : List[Any] = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
_SCREAMING_SNAKE_CASE : Tuple = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_SCREAMING_SNAKE_CASE : List[Any] = defaultdict(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# get log sigma
_SCREAMING_SNAKE_CASE : Dict = np.log(__lowerCamelCase )
# get distribution
_SCREAMING_SNAKE_CASE : Dict = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_SCREAMING_SNAKE_CASE : int = low_idx + 1
_SCREAMING_SNAKE_CASE : List[Any] = log_sigmas[low_idx]
_SCREAMING_SNAKE_CASE : List[Any] = log_sigmas[high_idx]
# interpolate sigmas
_SCREAMING_SNAKE_CASE : int = (low - log_sigma) / (low - high)
_SCREAMING_SNAKE_CASE : Any = np.clip(__lowerCamelCase , 0 , 1 )
# transform interpolation to time range
_SCREAMING_SNAKE_CASE : int = (1 - w) * low_idx + w * high_idx
_SCREAMING_SNAKE_CASE : Optional[Any] = t.reshape(sigma.shape )
return t
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> torch.FloatTensor:
_SCREAMING_SNAKE_CASE : float = in_sigmas[-1].item()
_SCREAMING_SNAKE_CASE : float = in_sigmas[0].item()
_SCREAMING_SNAKE_CASE : Optional[Any] = 7.0 # 7.0 is the value used in the paper
_SCREAMING_SNAKE_CASE : List[str] = np.linspace(0 , 1 , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = sigma_min ** (1 / rho)
_SCREAMING_SNAKE_CASE : Optional[int] = sigma_max ** (1 / rho)
_SCREAMING_SNAKE_CASE : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase_ ( self ) -> str:
return self.dt is None
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
_SCREAMING_SNAKE_CASE : str = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
_SCREAMING_SNAKE_CASE : Tuple = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.sigmas[step_index]
_SCREAMING_SNAKE_CASE : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_SCREAMING_SNAKE_CASE : Optional[Any] = self.sigmas[step_index - 1]
_SCREAMING_SNAKE_CASE : Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next
_SCREAMING_SNAKE_CASE : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_SCREAMING_SNAKE_CASE : Tuple = sigma_hat if self.state_in_first_order else sigma_next
_SCREAMING_SNAKE_CASE : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_SCREAMING_SNAKE_CASE : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_SCREAMING_SNAKE_CASE : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_SCREAMING_SNAKE_CASE : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
_SCREAMING_SNAKE_CASE : Tuple = derivative
_SCREAMING_SNAKE_CASE : Optional[Any] = dt
_SCREAMING_SNAKE_CASE : Any = sample
else:
# 2. 2nd order / Heun's method
_SCREAMING_SNAKE_CASE : Optional[Any] = (sample - pred_original_sample) / sigma_next
_SCREAMING_SNAKE_CASE : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_SCREAMING_SNAKE_CASE : Any = self.dt
_SCREAMING_SNAKE_CASE : int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : Optional[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : List[str] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_SCREAMING_SNAKE_CASE : List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
_SCREAMING_SNAKE_CASE : Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE : Dict = self.timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE : List[str] = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps]
_SCREAMING_SNAKE_CASE : Dict = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_SCREAMING_SNAKE_CASE : List[Any] = sigma.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 325
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_SCREAMING_SNAKE_CASE : set[int] = set()
return any(
node not in visited and depth_first_search(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for node in graph )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
visited.add(__lowerCamelCase )
rec_stk.add(__lowerCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__lowerCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 325
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 325
| 1
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
| 1
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = generate_pascal_triangle(__lowerCamelCase )
for row_idx in range(__lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx], end=" " )
else:
print(triangle[row_idx][col_idx], end="" )
print()
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_SCREAMING_SNAKE_CASE : list[list[int]] = []
for current_row_idx in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = populate_current_row(__lowerCamelCase, __lowerCamelCase )
triangle.append(__lowerCamelCase )
return triangle
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = 1, 1
for current_col_idx in range(1, __lowerCamelCase ):
calculate_current_element(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return current_row
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
_SCREAMING_SNAKE_CASE : Dict = triangle[current_row_idx - 1][current_col_idx - 1]
_SCREAMING_SNAKE_CASE : List[Any] = triangle[current_row_idx - 1][current_col_idx]
_SCREAMING_SNAKE_CASE : List[Any] = above_to_left_elt + above_to_right_elt
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_SCREAMING_SNAKE_CASE : list[list[int]] = [[1]]
for row_index in range(1, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [0] + result[-1] + [0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
_SCREAMING_SNAKE_CASE : str = sum(divmod(__lowerCamelCase, 2 ) )
_SCREAMING_SNAKE_CASE : Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1 )
]
_SCREAMING_SNAKE_CASE : Tuple = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_SCREAMING_SNAKE_CASE : str = row_first_half + row_second_half
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase, __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : List[str] = f"""{func.__name__}({value})"""
_SCREAMING_SNAKE_CASE : List[str] = timeit(f"""__main__.{call}""", setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCamelCase, __lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(__lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# set torch weights for 1-to-1 comparison
_SCREAMING_SNAKE_CASE : str = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE : int = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.output.dense, torch.tensor(__lowerCamelCase ).view(-1, __lowerCamelCase ).contiguous().transpose(0, 1 ), )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# set torch weights for 1-to-1 comparison
_SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE : Dict = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE : int = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.self_attention.key, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.output.dense, torch.tensor(__lowerCamelCase ).view(-1, __lowerCamelCase ).contiguous().transpose(0, 1 ), )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# layernorm 1
_SCREAMING_SNAKE_CASE : Optional[Any] = weights[0][0][0]
_SCREAMING_SNAKE_CASE : Any = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE : List[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm, torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase ), )
# lsh weights + output
_SCREAMING_SNAKE_CASE : Dict = weights[0][1]
if len(__lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCamelCase, torch_block.attention, __lowerCamelCase )
else:
set_layer_weights_in_torch_local(__lowerCamelCase, torch_block.attention, __lowerCamelCase )
# intermediate weighs
_SCREAMING_SNAKE_CASE : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCamelCase ) == 4:
_SCREAMING_SNAKE_CASE : List[str] = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE : List[str] = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm, torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase ), )
# intermediate dense
_SCREAMING_SNAKE_CASE : List[Any] = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE : str = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense, torch.tensor(__lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(__lowerCamelCase ), )
# intermediate out
_SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense, torch.tensor(__lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(__lowerCamelCase ), )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# reformer model
_SCREAMING_SNAKE_CASE : List[str] = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE : str = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings, torch.tensor(__lowerCamelCase ), )
if isinstance(weights[3], __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.tensor(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE : Any = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# output layer norm
_SCREAMING_SNAKE_CASE : int = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE : Dict = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm, torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase ), )
# output embeddings
_SCREAMING_SNAKE_CASE : Dict = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE : Dict = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder, torch.tensor(__lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(__lowerCamelCase ), )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE : int = ReformerConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ReformerModelWithLMHead(__lowerCamelCase )
with open(__lowerCamelCase, "rb" ) as f:
_SCREAMING_SNAKE_CASE : List[str] = pickle.load(__lowerCamelCase )["weights"]
set_model_weights_in_torch(__lowerCamelCase, __lowerCamelCase, config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), __lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase__ =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 325
|
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325
| 1
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = parent
_SCREAMING_SNAKE_CASE : Dict = 1_3
_SCREAMING_SNAKE_CASE : str = 7
_SCREAMING_SNAKE_CASE : List[Any] = 3_0
_SCREAMING_SNAKE_CASE : List[Any] = self.seq_length + self.mem_len
_SCREAMING_SNAKE_CASE : Any = 1_5
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : str = 9_9
_SCREAMING_SNAKE_CASE : Union[str, Any] = [1_0, 5_0, 8_0]
_SCREAMING_SNAKE_CASE : Optional[int] = 3_2
_SCREAMING_SNAKE_CASE : Union[str, Any] = 3_2
_SCREAMING_SNAKE_CASE : List[Any] = 4
_SCREAMING_SNAKE_CASE : Optional[int] = 8
_SCREAMING_SNAKE_CASE : Dict = 1_2_8
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : Any = 1
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : int = 3
_SCREAMING_SNAKE_CASE : Tuple = self.vocab_size - 1
_SCREAMING_SNAKE_CASE : Dict = 0.01
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase_ ( self ) -> Any:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = TFTransfoXLModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase ).to_tuple()
_SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids_a, "mems": mems_a}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = model(__lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : List[str] = TFTransfoXLLMHeadModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase ).to_tuple()
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"input_ids": input_ids_a, "labels": lm_labels}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase ).to_tuple()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = model([input_ids_a, mems_a] ).to_tuple()
_SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : str = TFTransfoXLForSequenceClassification(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) : List[str] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__snake_case = () if is_tf_available() else ()
__snake_case = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = TFTransfoXLModelTester(self )
_SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=__lowerCamelCase , d_embed=3_7 )
def UpperCamelCase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.model_tester.set_seed()
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
self.model_tester.set_seed()
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : str = model_class(__lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings()
assert isinstance(__lowerCamelCase , tf.keras.layers.Layer )
_SCREAMING_SNAKE_CASE : Optional[int] = model.get_bias()
assert name is None
else:
_SCREAMING_SNAKE_CASE : Tuple = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE : List[Any] = model.get_bias()
assert name is None
def UpperCamelCase_ ( self ) -> List[str]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase_ ( self ) -> int:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Tuple = TFTransfoXLModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def UpperCamelCase_ ( self ) -> Dict:
pass
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
_SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_SCREAMING_SNAKE_CASE : Tuple = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_SCREAMING_SNAKE_CASE : str = model.generate(__lowerCamelCase , max_length=2_0_0 , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCamelCase )
| 325
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase__ ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Optional[int] = value
_SCREAMING_SNAKE_CASE : Node | None = None
_SCREAMING_SNAKE_CASE : Node | None = None
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = tree
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
| 1
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCamelCase__ =Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCamelCase__ ={'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
UpperCamelCase__ ='zero2'
UpperCamelCase__ ='zero3'
UpperCamelCase__ =[ZEROa, ZEROa]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_SCREAMING_SNAKE_CASE : Dict = parameterized.to_safe_name("_".join(str(__lowerCamelCase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
UpperCamelCase__ =list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Any:
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1_0 , __lowerCamelCase = True , __lowerCamelCase = True , __lowerCamelCase = True , ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = models[model]
_SCREAMING_SNAKE_CASE : Tuple = self.run_trainer(
stage=__lowerCamelCase , model_name=__lowerCamelCase , eval_steps=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
self.do_checks(__lowerCamelCase )
return output_dir
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1_0 , __lowerCamelCase = 1 , __lowerCamelCase = True , __lowerCamelCase = True , ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = self.get_auto_remove_tmp_dir("./xxx" , after=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_SCREAMING_SNAKE_CASE : Union[str, Any] = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
_SCREAMING_SNAKE_CASE : Union[str, Any] = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_launcher(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
return output_dir
def UpperCamelCase_ ( self , __lowerCamelCase=False ) -> str:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_SCREAMING_SNAKE_CASE : Dict = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 325
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 1
|
from PIL import Image
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowerCamelCase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
UpperCamelCase__ =change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 325
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ ={
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 1
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "file.csv"
_SCREAMING_SNAKE_CASE : Union[str, Any] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(__lowerCamelCase, "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = tmp_path / "malformed_file.csv"
_SCREAMING_SNAKE_CASE : int = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(__lowerCamelCase, "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = tmp_path / "csv_with_image.csv"
_SCREAMING_SNAKE_CASE : Optional[Any] = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(__lowerCamelCase, "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "csv_with_label.csv"
_SCREAMING_SNAKE_CASE : Tuple = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(__lowerCamelCase, "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = tmp_path / "csv_with_int_list.csv"
_SCREAMING_SNAKE_CASE : Dict = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(__lowerCamelCase, "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = Csv()
_SCREAMING_SNAKE_CASE : List[str] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__lowerCamelCase, match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(__lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase__ (__lowerCamelCase ):
with open(__lowerCamelCase, encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE : Optional[Any] = f.read().splitlines()[1]
_SCREAMING_SNAKE_CASE : int = Csv(encoding="utf-8", features=Features({"image": Image()} ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = csv._generate_tables([[csv_file_with_image]] )
_SCREAMING_SNAKE_CASE : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_SCREAMING_SNAKE_CASE : Dict = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase__ (__lowerCamelCase ):
with open(__lowerCamelCase, encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE : Any = f.read().splitlines()[1:]
_SCREAMING_SNAKE_CASE : Any = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_SCREAMING_SNAKE_CASE : Tuple = csv._generate_tables([[csv_file_with_label]] )
_SCREAMING_SNAKE_CASE : List[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_SCREAMING_SNAKE_CASE : Optional[int] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(__lowerCamelCase ) for label in labels]
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = Csv(encoding="utf-8", sep=",", converters={"int_list": lambda __lowerCamelCase : [int(__lowerCamelCase ) for i in x.split()]} )
_SCREAMING_SNAKE_CASE : Union[str, Any] = csv._generate_tables([[csv_file_with_int_list]] )
_SCREAMING_SNAKE_CASE : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_SCREAMING_SNAKE_CASE : int = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 325
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoints.load_tax_checkpoint(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = flatten_dict(__lowerCamelCase )
return flax_params
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = {}
_SCREAMING_SNAKE_CASE : Tuple = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
_SCREAMING_SNAKE_CASE : str = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_SCREAMING_SNAKE_CASE : List[str] = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_SCREAMING_SNAKE_CASE : List[str] = new_key.replace(__lowerCamelCase, __lowerCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_SCREAMING_SNAKE_CASE : int = new_key.replace(__lowerCamelCase, __lowerCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_SCREAMING_SNAKE_CASE : Optional[int] = re.sub(R"layers_(\d+)", R"layer.\1", __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = new_key.replace("encoder", "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_SCREAMING_SNAKE_CASE : Optional[int] = re.sub(R"layers_(\d+)", R"layer.\1", __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = flax_dict[key]
_SCREAMING_SNAKE_CASE : int = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_SCREAMING_SNAKE_CASE : int = torch.from_numpy(converted_dict[key].T )
else:
_SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : int = get_flax_param(__lowerCamelCase )
if not use_large:
_SCREAMING_SNAKE_CASE : Any = PixaStructVisionConfig()
_SCREAMING_SNAKE_CASE : Optional[int] = PixaStructTextConfig()
else:
_SCREAMING_SNAKE_CASE : Any = PixaStructVisionConfig(
hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 )
_SCREAMING_SNAKE_CASE : Tuple = PixaStructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18 )
_SCREAMING_SNAKE_CASE : List[str] = PixaStructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = PixaStructForConditionalGeneration(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = rename_and_convert_flax_params(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[int] = PixaStructImageProcessor()
_SCREAMING_SNAKE_CASE : List[str] = PixaStructProcessor(image_processor=__lowerCamelCase, tokenizer=__lowerCamelCase )
if use_large:
_SCREAMING_SNAKE_CASE : Dict = 4096
_SCREAMING_SNAKE_CASE : Optional[Any] = True
# mkdir if needed
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
print("Model saved in {}".format(__lowerCamelCase ) )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
UpperCamelCase__ =parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 325
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = 0 ):
_SCREAMING_SNAKE_CASE : Dict = length or len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = list_data[i + 1], list_data[i]
_SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(__lowerCamelCase, length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 1
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_0_0_0 , __lowerCamelCase=[3, 3, 6, 4] , __lowerCamelCase=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = parent
_SCREAMING_SNAKE_CASE : Dict = batch_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_channels
_SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
_SCREAMING_SNAKE_CASE : Any = use_labels
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
_SCREAMING_SNAKE_CASE : List[Any] = image_size
_SCREAMING_SNAKE_CASE : List[str] = layer_depths
_SCREAMING_SNAKE_CASE : Tuple = embed_dims
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1E-5 , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[str] = self.num_labels
_SCREAMING_SNAKE_CASE : Union[str, Any] = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_SCREAMING_SNAKE_CASE : Dict = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ) -> Optional[int]:
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) : str = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__snake_case = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = SwiftFormerModelTester(self )
_SCREAMING_SNAKE_CASE : List[str] = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def UpperCamelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : str = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[str] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Dict = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def UpperCamelCase_ ( self ) -> int:
pass
def UpperCamelCase_ ( self ) -> Dict:
def check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[int] = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
def _config_zero_init(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1E-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : str = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : int = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase_ ( self ) -> int:
pass
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> Any:
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
_SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
| 325
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 1
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 42
__snake_case = None
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=0.999, __lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_SCREAMING_SNAKE_CASE : int = []
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = i / num_diffusion_timesteps
_SCREAMING_SNAKE_CASE : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ), __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase, dtype=torch.floataa )
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self , __lowerCamelCase = 1_0_0_0 , __lowerCamelCase = 0.0001 , __lowerCamelCase = 0.02 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = True , __lowerCamelCase = 0 , __lowerCamelCase = "epsilon" , __lowerCamelCase = 1.0 , **__lowerCamelCase , ) -> Optional[Any]:
if kwargs.get("set_alpha_to_one" , __lowerCamelCase ) is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , __lowerCamelCase , standard_warn=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = kwargs["set_alpha_to_one"]
if trained_betas is not None:
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_SCREAMING_SNAKE_CASE : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_SCREAMING_SNAKE_CASE : Tuple = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 - self.betas
_SCREAMING_SNAKE_CASE : Dict = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : str = torch.from_numpy(np.arange(0 , __lowerCamelCase ).copy().astype(np.intaa ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> torch.FloatTensor:
return sample
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
F""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
F""" maximal {self.config.num_train_timesteps} timesteps.""" )
_SCREAMING_SNAKE_CASE : Optional[int] = num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[int] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE : Tuple = (np.arange(0 , __lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
self.timesteps += self.config.steps_offset
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_SCREAMING_SNAKE_CASE : Tuple = self.alphas_cumprod[timestep]
_SCREAMING_SNAKE_CASE : List[Any] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Tuple = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_SCREAMING_SNAKE_CASE : List[str] = model_output
elif self.config.prediction_type == "sample":
_SCREAMING_SNAKE_CASE : str = model_output
_SCREAMING_SNAKE_CASE : Tuple = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_SCREAMING_SNAKE_CASE : Any = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_SCREAMING_SNAKE_CASE : Any = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_SCREAMING_SNAKE_CASE : str = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_SCREAMING_SNAKE_CASE : Union[str, Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 325
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 1
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 1
|
import argparse
import os
import re
import packaging.version
UpperCamelCase__ ='examples/'
UpperCamelCase__ ={
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase__ ={
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase__ ='README.md'
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.read()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = REPLACE_PATTERNS[pattern]
_SCREAMING_SNAKE_CASE : Any = replace.replace("VERSION", __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = re_pattern.sub(__lowerCamelCase, __lowerCamelCase )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.write(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
for folder, directories, fnames in os.walk(__lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__lowerCamelCase, __lowerCamelCase ), __lowerCamelCase, pattern="examples" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if not patch:
update_version_in_examples(__lowerCamelCase )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = "🤗 Transformers currently provides the following architectures"
_SCREAMING_SNAKE_CASE : Any = "1. Want to contribute a new model?"
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : List[str] = f.readlines()
# Find the start of the list.
_SCREAMING_SNAKE_CASE : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_SCREAMING_SNAKE_CASE : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
_SCREAMING_SNAKE_CASE : List[Any] = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc", "https://huggingface.co/docs/diffusers/model_doc", )
index += 1
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
def lowerCamelCase__ ():
with open(REPLACE_FILES["init"], "r" ) as f:
_SCREAMING_SNAKE_CASE : List[Any] = f.read()
_SCREAMING_SNAKE_CASE : Dict = REPLACE_PATTERNS["init"][0].search(__lowerCamelCase ).groups()[0]
return packaging.version.parse(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
_SCREAMING_SNAKE_CASE : Dict = default_version.base_version
elif patch:
_SCREAMING_SNAKE_CASE : List[Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_SCREAMING_SNAKE_CASE : str = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_SCREAMING_SNAKE_CASE : List[str] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__lowerCamelCase ) == 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__lowerCamelCase, patch=__lowerCamelCase )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : List[Any] = get_version()
_SCREAMING_SNAKE_CASE : str = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_SCREAMING_SNAKE_CASE : Any = current_version.base_version
# Check with the user we got that right.
_SCREAMING_SNAKE_CASE : str = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__lowerCamelCase ) == 0:
_SCREAMING_SNAKE_CASE : List[Any] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__lowerCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase__ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 325
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 1
|
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = get_failure_array(__lowerCamelCase )
# 2) Step through text searching for pattern
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = 0, 0 # index into text, pattern
while i < len(__lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(__lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = [0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : int = 1
while j < len(__lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_SCREAMING_SNAKE_CASE : str = failure[i - 1]
continue
j += 1
failure.append(__lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase__ ='abc1abc12'
UpperCamelCase__ ='alskfjaldsabc1abc1abc12k23adsfabcabc'
UpperCamelCase__ ='alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase__ ='ABABX'
UpperCamelCase__ ='ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
UpperCamelCase__ ='AAAB'
UpperCamelCase__ ='ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
UpperCamelCase__ ='abcdabcy'
UpperCamelCase__ ='abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
UpperCamelCase__ ='aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 325
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "depth_multiplier" ) )
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3 , __lowerCamelCase=3_2 , __lowerCamelCase=0.25 , __lowerCamelCase=8 , __lowerCamelCase=True , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=3_2 , __lowerCamelCase="relu6" , __lowerCamelCase=0.1 , __lowerCamelCase=0.02 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=None , ) -> int:
_SCREAMING_SNAKE_CASE : Any = parent
_SCREAMING_SNAKE_CASE : Optional[int] = batch_size
_SCREAMING_SNAKE_CASE : int = num_channels
_SCREAMING_SNAKE_CASE : Any = image_size
_SCREAMING_SNAKE_CASE : Optional[Any] = depth_multiplier
_SCREAMING_SNAKE_CASE : Optional[Any] = min_depth
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf_padding
_SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
_SCREAMING_SNAKE_CASE : Dict = output_stride
_SCREAMING_SNAKE_CASE : Any = hidden_act
_SCREAMING_SNAKE_CASE : Any = classifier_dropout_prob
_SCREAMING_SNAKE_CASE : int = use_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : Dict = num_labels
_SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
_SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ) -> List[str]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
_SCREAMING_SNAKE_CASE : int = MobileNetVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
_SCREAMING_SNAKE_CASE : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__snake_case = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = MobileNetVaModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_6
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Dict:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> Tuple:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
_SCREAMING_SNAKE_CASE : str = prepare_img()
_SCREAMING_SNAKE_CASE : List[str] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : int = model(**__lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
| 325
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 1
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ =logging.getLogger(__name__)
UpperCamelCase__ =list(MODEL_WITH_LM_HEAD_MAPPING.keys())
UpperCamelCase__ =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = field(
default=__lowercase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__lowercase )} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = field(
default=__lowercase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case = field(
default=__lowercase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case = field(default=__lowercase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False, __lowerCamelCase = None, ):
def _dataset(__lowerCamelCase, __lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=__lowerCamelCase, file_path=__lowerCamelCase, block_size=args.block_size, ref_path=__lowerCamelCase, )
return LineByLineTextDataset(tokenizer=__lowerCamelCase, file_path=__lowerCamelCase, block_size=args.block_size )
else:
return TextDataset(
tokenizer=__lowerCamelCase, file_path=__lowerCamelCase, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=__lowerCamelCase, )
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file, args.train_ref_file )
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=__lowerCamelCase, cache_dir=model_args.cache_dir, )
else:
logger.info("Training new model from scratch" )
_SCREAMING_SNAKE_CASE : List[str] = AutoModelWithLMHead.from_config(__lowerCamelCase )
model.resize_token_embeddings(len(__lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(data_args.block_size, tokenizer.max_len )
# Get datasets
_SCREAMING_SNAKE_CASE : Tuple = (
get_dataset(__lowerCamelCase, tokenizer=__lowerCamelCase, cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_SCREAMING_SNAKE_CASE : Any = (
get_dataset(__lowerCamelCase, tokenizer=__lowerCamelCase, evaluate=__lowerCamelCase, cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_SCREAMING_SNAKE_CASE : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=__lowerCamelCase, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length, )
else:
if data_args.mlm and data_args.whole_word_mask:
_SCREAMING_SNAKE_CASE : int = DataCollatorForWholeWordMask(
tokenizer=__lowerCamelCase, mlm_probability=data_args.mlm_probability )
else:
_SCREAMING_SNAKE_CASE : int = DataCollatorForLanguageModeling(
tokenizer=__lowerCamelCase, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=__lowerCamelCase, args=__lowerCamelCase, data_collator=__lowerCamelCase, train_dataset=__lowerCamelCase, eval_dataset=__lowerCamelCase, prediction_loss_only=__lowerCamelCase, )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE : List[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_SCREAMING_SNAKE_CASE : Any = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_SCREAMING_SNAKE_CASE : str = trainer.evaluate()
_SCREAMING_SNAKE_CASE : Union[str, Any] = math.exp(eval_output["eval_loss"] )
_SCREAMING_SNAKE_CASE : Any = {"perplexity": perplexity}
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(training_args.output_dir, "eval_results_lm.txt" )
if trainer.is_world_master():
with open(__lowerCamelCase, "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s", __lowerCamelCase, str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(__lowerCamelCase )
return results
def lowerCamelCase__ (__lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 325
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 325
| 1
|
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self ) -> None:
_SCREAMING_SNAKE_CASE : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_SCREAMING_SNAKE_CASE : Any = False
def UpperCamelCase_ ( self , __lowerCamelCase ) -> None:
for word in words:
self.insert(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self
for char in word:
if char not in curr.nodes:
_SCREAMING_SNAKE_CASE : Optional[int] = TrieNode()
_SCREAMING_SNAKE_CASE : Any = curr.nodes[char]
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def UpperCamelCase_ ( self , __lowerCamelCase ) -> bool:
_SCREAMING_SNAKE_CASE : Optional[Any] = self
for char in word:
if char not in curr.nodes:
return False
_SCREAMING_SNAKE_CASE : Optional[Any] = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase_ ( self , __lowerCamelCase ) -> None:
def _delete(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> bool:
if index == len(__lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_SCREAMING_SNAKE_CASE : List[str] = False
return len(curr.nodes ) == 0
_SCREAMING_SNAKE_CASE : Any = word[index]
_SCREAMING_SNAKE_CASE : List[Any] = curr.nodes.get(__lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_SCREAMING_SNAKE_CASE : Union[str, Any] = _delete(__lowerCamelCase , __lowerCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowerCamelCase , 0 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if node.is_leaf:
print(__lowerCamelCase, end=" " )
for key, value in node.nodes.items():
print_words(__lowerCamelCase, word + key )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : List[Any] = "banana bananas bandana band apple all beast".split()
_SCREAMING_SNAKE_CASE : Union[str, Any] = TrieNode()
root.insert_many(__lowerCamelCase )
# print_words(root, "")
assert all(root.find(__lowerCamelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
print(str(__lowerCamelCase ), "works!" if passes else "doesn't work :(" )
def lowerCamelCase__ ():
assert test_trie()
def lowerCamelCase__ ():
print_results("Testing trie functionality", test_trie() )
if __name__ == "__main__":
main()
| 325
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = " " ):
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[Any] = 0
for index, char in enumerate(__lowerCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_SCREAMING_SNAKE_CASE : List[Any] = index + 1
elif index + 1 == len(__lowerCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 325
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 1
|
import unittest
from knapsack import knapsack as k
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Optional[int] = [0]
_SCREAMING_SNAKE_CASE : Tuple = [0]
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 0 )
_SCREAMING_SNAKE_CASE : Tuple = [6_0]
_SCREAMING_SNAKE_CASE : Dict = [1_0]
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 0 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = 3
_SCREAMING_SNAKE_CASE : Tuple = [1, 2, 3]
_SCREAMING_SNAKE_CASE : str = [3, 2, 1]
_SCREAMING_SNAKE_CASE : int = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 5 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = 5_0
_SCREAMING_SNAKE_CASE : Optional[Any] = [6_0, 1_0_0, 1_2_0]
_SCREAMING_SNAKE_CASE : Any = [1_0, 2_0, 3_0]
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 325
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
UpperCamelCase__ =None
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ ={
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ ={
'google/fnet-base': 512,
'google/fnet-large': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'token_type_ids']
__snake_case = FNetTokenizer
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , **__lowerCamelCase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : Tuple = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
_SCREAMING_SNAKE_CASE : Any = remove_space
_SCREAMING_SNAKE_CASE : int = keep_accents
_SCREAMING_SNAKE_CASE : Tuple = vocab_file
_SCREAMING_SNAKE_CASE : Any = False if not self.vocab_file else True
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 325
|
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_heads" ) )
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=6_4 , __lowerCamelCase=3 , __lowerCamelCase=[1_6, 4_8, 9_6] , __lowerCamelCase=[1, 3, 6] , __lowerCamelCase=[1, 2, 1_0] , __lowerCamelCase=[7, 3, 3] , __lowerCamelCase=[4, 2, 2] , __lowerCamelCase=[2, 1, 1] , __lowerCamelCase=[2, 2, 2] , __lowerCamelCase=[False, False, True] , __lowerCamelCase=[0.0, 0.0, 0.0] , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=2 , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : int = patch_sizes
_SCREAMING_SNAKE_CASE : Optional[int] = patch_stride
_SCREAMING_SNAKE_CASE : Optional[int] = patch_padding
_SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
_SCREAMING_SNAKE_CASE : str = num_labels
_SCREAMING_SNAKE_CASE : Optional[int] = num_channels
_SCREAMING_SNAKE_CASE : List[Any] = embed_dim
_SCREAMING_SNAKE_CASE : List[Any] = num_heads
_SCREAMING_SNAKE_CASE : Union[str, Any] = stride_kv
_SCREAMING_SNAKE_CASE : Any = depth
_SCREAMING_SNAKE_CASE : Any = cls_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_drop_rate
_SCREAMING_SNAKE_CASE : Any = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> str:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = TFCvtModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , training=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = (self.image_size, self.image_size)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_SCREAMING_SNAKE_CASE : Dict = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtForImageClassification(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__snake_case = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = TFCvtModelTester(self )
_SCREAMING_SNAKE_CASE : Tuple = TFCvtConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> str:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> List[Any]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCamelCase_ ( self ) -> Optional[int]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(__lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[str]:
def check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFCvtModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_SCREAMING_SNAKE_CASE : int = self.default_image_processor
_SCREAMING_SNAKE_CASE : List[str] = prepare_img()
_SCREAMING_SNAKE_CASE : List[str] = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
_SCREAMING_SNAKE_CASE : Optional[int] = model(**__lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE : List[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1E-4 ) )
| 325
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 1
|
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCamelCase__ (__lowerCamelCase="ro", __lowerCamelCase="en", __lowerCamelCase="wmt16", __lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
_SCREAMING_SNAKE_CASE : List[Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset(__lowerCamelCase, __lowerCamelCase )
if save_dir is None:
_SCREAMING_SNAKE_CASE : str = f"""{dataset}-{pair}"""
_SCREAMING_SNAKE_CASE : Tuple = Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
_SCREAMING_SNAKE_CASE : List[Any] = "val" if split == "validation" else split
_SCREAMING_SNAKE_CASE : Any = save_dir.joinpath(f"""{fn}.source""" )
_SCREAMING_SNAKE_CASE : List[Any] = save_dir.joinpath(f"""{fn}.target""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = src_path.open("w+" )
_SCREAMING_SNAKE_CASE : Any = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_SCREAMING_SNAKE_CASE : List[str] = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 325
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
UpperCamelCase__ =5
UpperCamelCase__ =10
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = SpeechaTextTokenizer
__snake_case = False
__snake_case = True
def UpperCamelCase_ ( self ) -> List[str]:
super().setUp()
_SCREAMING_SNAKE_CASE : Union[str, Any] = sp.SentencePieceProcessor()
spm_model.Load(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__lowerCamelCase ) )]
_SCREAMING_SNAKE_CASE : str = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_SCREAMING_SNAKE_CASE : Tuple = Path(self.tmpdirname )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = "<pad>"
_SCREAMING_SNAKE_CASE : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__lowerCamelCase ) , 1_0_0_1 )
def UpperCamelCase_ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Tuple = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def UpperCamelCase_ ( self ) -> str:
# fmt: off
_SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'valhalla/s2t_mustc_multilinguial_medium'
__snake_case = 'C\'est trop cool'
__snake_case = 'Esto es genial'
@classmethod
def UpperCamelCase_ ( cls ) -> Dict:
_SCREAMING_SNAKE_CASE : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def UpperCamelCase_ ( self ) -> int:
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 1_1 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def UpperCamelCase_ ( self ) -> Any:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
_SCREAMING_SNAKE_CASE : Optional[Any] = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = "fr"
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __lowerCamelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
_SCREAMING_SNAKE_CASE : List[Any] = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 325
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['pixel_values']
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = PILImageResampling.BICUBIC , __lowerCamelCase = True , __lowerCamelCase = 1 / 2_5_5 , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , **__lowerCamelCase , ) -> None:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
_SCREAMING_SNAKE_CASE : int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = do_resize
_SCREAMING_SNAKE_CASE : str = size
_SCREAMING_SNAKE_CASE : int = resample
_SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
_SCREAMING_SNAKE_CASE : Any = rescale_factor
_SCREAMING_SNAKE_CASE : int = do_normalize
_SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE : Tuple = do_convert_rgb
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = PILImageResampling.BICUBIC , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_SCREAMING_SNAKE_CASE : List[str] = (size["height"], size["width"])
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> int:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE : int = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : Dict = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : Dict = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE : Optional[Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : List[str] = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : Tuple = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE : Dict = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
_SCREAMING_SNAKE_CASE : Any = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
_SCREAMING_SNAKE_CASE : str = BatchFeature(data={"pixel_values": images} , tensor_type=__lowerCamelCase )
return encoded_outputs
| 325
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ ={
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'mask2former'
__snake_case = ['swin']
__snake_case = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 1_0_2_4 , __lowerCamelCase = "relu" , __lowerCamelCase = 6 , __lowerCamelCase = 1_0 , __lowerCamelCase = 8 , __lowerCamelCase = 0.0 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = 4 , __lowerCamelCase = 2_5_5 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.1 , __lowerCamelCase = 2.0 , __lowerCamelCase = 5.0 , __lowerCamelCase = 5.0 , __lowerCamelCase = 1_2_5_4_4 , __lowerCamelCase = 3.0 , __lowerCamelCase = 0.75 , __lowerCamelCase = 0.02 , __lowerCamelCase = 1.0 , __lowerCamelCase = True , __lowerCamelCase = [4, 8, 1_6, 3_2] , __lowerCamelCase = None , **__lowerCamelCase , ) -> Tuple:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
_SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAPPING["swin"](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = backbone_config.pop("model_type" )
_SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE : Dict = config_class.from_dict(__lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {','.join(self.backbones_supported )}""" )
_SCREAMING_SNAKE_CASE : Dict = backbone_config
_SCREAMING_SNAKE_CASE : Optional[Any] = feature_size
_SCREAMING_SNAKE_CASE : str = mask_feature_size
_SCREAMING_SNAKE_CASE : str = hidden_dim
_SCREAMING_SNAKE_CASE : List[str] = encoder_feedforward_dim
_SCREAMING_SNAKE_CASE : Tuple = activation_function
_SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
_SCREAMING_SNAKE_CASE : Tuple = decoder_layers
_SCREAMING_SNAKE_CASE : Dict = num_attention_heads
_SCREAMING_SNAKE_CASE : str = dropout
_SCREAMING_SNAKE_CASE : str = dim_feedforward
_SCREAMING_SNAKE_CASE : int = pre_norm
_SCREAMING_SNAKE_CASE : int = enforce_input_projection
_SCREAMING_SNAKE_CASE : Optional[Any] = common_stride
_SCREAMING_SNAKE_CASE : List[str] = ignore_value
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_queries
_SCREAMING_SNAKE_CASE : List[str] = no_object_weight
_SCREAMING_SNAKE_CASE : List[str] = class_weight
_SCREAMING_SNAKE_CASE : List[Any] = mask_weight
_SCREAMING_SNAKE_CASE : List[Any] = dice_weight
_SCREAMING_SNAKE_CASE : Optional[Any] = train_num_points
_SCREAMING_SNAKE_CASE : List[str] = oversample_ratio
_SCREAMING_SNAKE_CASE : str = importance_sample_ratio
_SCREAMING_SNAKE_CASE : Union[str, Any] = init_std
_SCREAMING_SNAKE_CASE : List[Any] = init_xavier_std
_SCREAMING_SNAKE_CASE : Any = use_auxiliary_loss
_SCREAMING_SNAKE_CASE : Any = feature_strides
_SCREAMING_SNAKE_CASE : Dict = output_auxiliary_logits
_SCREAMING_SNAKE_CASE : str = decoder_layers
super().__init__(**__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> List[str]:
return cls(
backbone_config=__lowerCamelCase , **__lowerCamelCase , )
def UpperCamelCase_ ( self ) -> Dict[str, any]:
_SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE : str = self.__class__.model_type
return output
| 325
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 1
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = AutoencoderKL
__snake_case = 'sample'
__snake_case = 1E-2
@property
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : str = 3
_SCREAMING_SNAKE_CASE : Union[str, Any] = (3_2, 3_2)
_SCREAMING_SNAKE_CASE : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
return {"sample": image}
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return (3, 3_2, 3_2)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return (3, 3_2, 3_2)
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
def UpperCamelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def UpperCamelCase_ ( self ) -> Dict:
# enable deterministic behavior for gradient checkpointing
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Any = self.model_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
_SCREAMING_SNAKE_CASE : Tuple = model(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_SCREAMING_SNAKE_CASE : List[str] = torch.randn_like(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_SCREAMING_SNAKE_CASE : Any = self.model_class(**__lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_SCREAMING_SNAKE_CASE : Optional[int] = model_a(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_SCREAMING_SNAKE_CASE : List[str] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = dict(model.named_parameters() )
_SCREAMING_SNAKE_CASE : Any = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model.to(__lowerCamelCase )
model.eval()
if torch_device == "mps":
_SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
else:
_SCREAMING_SNAKE_CASE : int = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_SCREAMING_SNAKE_CASE : Optional[Any] = image.to(__lowerCamelCase )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , sample_posterior=__lowerCamelCase , generator=__lowerCamelCase ).sample
_SCREAMING_SNAKE_CASE : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_SCREAMING_SNAKE_CASE : int = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__lowerCamelCase , __lowerCamelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCamelCase ) for s in shape] )}.npy"""
def UpperCamelCase_ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self , __lowerCamelCase=0 , __lowerCamelCase=(4, 3, 5_1_2, 5_1_2) , __lowerCamelCase=False ) -> Any:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.floataa if fpaa else torch.floataa
_SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCamelCase , __lowerCamelCase ) ) ).to(__lowerCamelCase ).to(__lowerCamelCase )
return image
def UpperCamelCase_ ( self , __lowerCamelCase="CompVis/stable-diffusion-v1-4" , __lowerCamelCase=False ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = "fp16" if fpaa else None
_SCREAMING_SNAKE_CASE : List[str] = torch.floataa if fpaa else torch.floataa
_SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL.from_pretrained(
__lowerCamelCase , subfolder="vae" , torch_dtype=__lowerCamelCase , revision=__lowerCamelCase , )
model.to(__lowerCamelCase ).eval()
return model
def UpperCamelCase_ ( self , __lowerCamelCase=0 ) -> Dict:
if torch_device == "mps":
return torch.manual_seed(__lowerCamelCase )
return torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[4_7, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : Dict = self.get_sd_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , generator=__lowerCamelCase , sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
_SCREAMING_SNAKE_CASE : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[4_7, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(__lowerCamelCase , fpaa=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self.get_generator(__lowerCamelCase )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase , generator=__lowerCamelCase , sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
_SCREAMING_SNAKE_CASE : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_SCREAMING_SNAKE_CASE : int = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[4_7, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_image(__lowerCamelCase )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase ).sample
assert sample.shape == image.shape
_SCREAMING_SNAKE_CASE : Optional[int] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[1_3, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[3_7, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(__lowerCamelCase , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Dict = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
_SCREAMING_SNAKE_CASE : List[str] = sample[-1, -2:, :2, -2:].flatten().cpu()
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[2_7, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[1_6, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model(fpaa=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_image(__lowerCamelCase , shape=(3, 4, 6_4, 6_4) , fpaa=__lowerCamelCase )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
_SCREAMING_SNAKE_CASE : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_SCREAMING_SNAKE_CASE : str = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=5E-3 )
@parameterized.expand([(1_3,), (1_6,), (2_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Dict = self.get_sd_vae_model(fpaa=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.get_sd_image(__lowerCamelCase , shape=(3, 4, 6_4, 6_4) , fpaa=__lowerCamelCase )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : int = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=1E-1 )
@parameterized.expand([(1_3,), (1_6,), (3_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(__lowerCamelCase , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[4_7, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model.encode(__lowerCamelCase ).latent_dist
_SCREAMING_SNAKE_CASE : Tuple = dist.sample(generator=__lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_SCREAMING_SNAKE_CASE : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
_SCREAMING_SNAKE_CASE : int = torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=__lowerCamelCase )
| 325
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 1
|
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCamelCase__ ='src/transformers'
# Matches is_xxx_available()
UpperCamelCase__ =re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
UpperCamelCase__ =re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCamelCase__ =re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
UpperCamelCase__ =re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
UpperCamelCase__ =re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCamelCase__ =re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCamelCase__ =re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCamelCase__ =re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
UpperCamelCase__ =re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
UpperCamelCase__ =re.compile(R'^\s*try:')
# Catches a line with else:
UpperCamelCase__ =re.compile(R'^\s*else:')
def lowerCamelCase__ (__lowerCamelCase ):
if _re_test_backend.search(__lowerCamelCase ) is None:
return None
_SCREAMING_SNAKE_CASE : Dict = [b[0] for b in _re_backend.findall(__lowerCamelCase )]
backends.sort()
return "_and_".join(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : str = 0
while line_index < len(__lowerCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_SCREAMING_SNAKE_CASE : Optional[int] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_SCREAMING_SNAKE_CASE : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = _re_one_line_import_struct.search(__lowerCamelCase ).groups()[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = re.findall("\[([^\]]+)\]", __lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_SCREAMING_SNAKE_CASE : List[Any] = _re_import_struct_key_value.search(__lowerCamelCase )
if single_line_import_search is not None:
_SCREAMING_SNAKE_CASE : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_SCREAMING_SNAKE_CASE : List[Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_SCREAMING_SNAKE_CASE : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(__lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCamelCase ) is not None:
_SCREAMING_SNAKE_CASE : List[Any] = _re_import_struct_add_many.search(__lowerCamelCase ).groups()[0].split(", " )
_SCREAMING_SNAKE_CASE : int = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_between_brackets.search(__lowerCamelCase ) is not None:
_SCREAMING_SNAKE_CASE : str = _re_between_brackets.search(__lowerCamelCase ).groups()[0].split(", " )
_SCREAMING_SNAKE_CASE : int = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_quote_object.search(__lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_SCREAMING_SNAKE_CASE : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_SCREAMING_SNAKE_CASE : Optional[Any] = []
while (
line_index < len(__lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_SCREAMING_SNAKE_CASE : Any = lines[line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_SCREAMING_SNAKE_CASE : Tuple = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_SCREAMING_SNAKE_CASE : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_SCREAMING_SNAKE_CASE : List[Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : int = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_SCREAMING_SNAKE_CASE : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
def find_duplicates(__lowerCamelCase ):
return [k for k, v in collections.Counter(__lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_SCREAMING_SNAKE_CASE : Optional[int] = []
for key in import_dict_objects.keys():
_SCREAMING_SNAKE_CASE : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_SCREAMING_SNAKE_CASE : str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_SCREAMING_SNAKE_CASE : List[Any] = "base imports" if key == "none" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : List[Any] = []
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase, "__init__.py" )
_SCREAMING_SNAKE_CASE : List[str] = parse_init(__lowerCamelCase )
if objects is not None:
_SCREAMING_SNAKE_CASE : Dict = analyze_results(*__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__lowerCamelCase ) )
if len(__lowerCamelCase ) > 0:
raise ValueError("\n\n".join(__lowerCamelCase ) )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : int = []
for path, directories, files in os.walk(__lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
_SCREAMING_SNAKE_CASE : Any = str((Path(__lowerCamelCase ) / folder).relative_to(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = short_path.replace(os.path.sep, "." )
submodules.append(__lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
_SCREAMING_SNAKE_CASE : Union[str, Any] = str((Path(__lowerCamelCase ) / fname).relative_to(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : List[str] = short_path.replace(".py", "" ).replace(os.path.sep, "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__lowerCamelCase )
return submodules
UpperCamelCase__ =[
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCamelCase__ ():
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE : Dict = importlib.util.spec_from_file_location(
"transformers", os.path.join(__lowerCamelCase, "__init__.py" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
_SCREAMING_SNAKE_CASE : List[str] = spec.loader.load_module()
_SCREAMING_SNAKE_CASE : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Any = "\n".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = [[float("inf" ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : int = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase, __lowerCamelCase )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ =int(input('Enter number of vertices: '))
UpperCamelCase__ =int(input('Enter number of edges: '))
UpperCamelCase__ =[[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ =0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ =int(input('Enter source:'))
UpperCamelCase__ =int(input('Enter destination:'))
UpperCamelCase__ =float(input('Enter weight:'))
UpperCamelCase__ =weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 325
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 1
|
import qiskit
def lowerCamelCase__ (__lowerCamelCase = 2 ):
_SCREAMING_SNAKE_CASE : Tuple = qubits
# Using Aer's simulator
_SCREAMING_SNAKE_CASE : Dict = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
_SCREAMING_SNAKE_CASE : Optional[int] = qiskit.QuantumCircuit(__lowerCamelCase, __lowerCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1, __lowerCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1, __lowerCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowerCamelCase ) ), list(range(__lowerCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_SCREAMING_SNAKE_CASE : Tuple = qiskit.execute(__lowerCamelCase, __lowerCamelCase, shots=1000 )
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}")
| 325
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = str(bin(__lowerCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
_SCREAMING_SNAKE_CASE : str = str(bin(__lowerCamelCase ) )[2:]
if shift_amount >= len(__lowerCamelCase ):
return "0b0"
_SCREAMING_SNAKE_CASE : Tuple = binary_number[: len(__lowerCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if number >= 0: # Get binary representation of positive number
_SCREAMING_SNAKE_CASE : Optional[Any] = "0" + str(bin(__lowerCamelCase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
_SCREAMING_SNAKE_CASE : Tuple = len(bin(__lowerCamelCase )[3:] ) # Find 2's complement of number
_SCREAMING_SNAKE_CASE : int = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:]
_SCREAMING_SNAKE_CASE : Any = (
"1" + "0" * (binary_number_length - len(__lowerCamelCase )) + binary_number
)
if shift_amount >= len(__lowerCamelCase ):
return "0b" + binary_number[0] * len(__lowerCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 1
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'WhisperFeatureExtractor'
__snake_case = 'WhisperTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> int:
super().__init__(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extractor
_SCREAMING_SNAKE_CASE : int = False
def UpperCamelCase_ ( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True ) -> List[Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCamelCase , language=__lowerCamelCase , no_timestamps=__lowerCamelCase )
def __call__( self , *__lowerCamelCase , **__lowerCamelCase ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = kwargs.pop("audio" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = kwargs.pop("sampling_rate" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = kwargs.pop("text" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = args[0]
_SCREAMING_SNAKE_CASE : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = encodings["input_ids"]
return inputs
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Any:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Dict:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase="np" ) -> Optional[int]:
return self.tokenizer.get_prompt_ids(__lowerCamelCase , return_tensors=__lowerCamelCase )
| 325
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__snake_case = 'CIDAS/clipseg-rd64-refined'
__snake_case = 'image_segmenter'
__snake_case = CLIPSegForImageSegmentation
__snake_case = ['image', 'text']
__snake_case = ['image']
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> List[Any]:
requires_backends(self , ["vision"] )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
return self.pre_processor(text=[label] , images=[image] , padding=__lowerCamelCase , return_tensors="pt" )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model(**__lowerCamelCase ).logits
return logits
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : int = outputs.cpu().detach().numpy()
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : int = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 325
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ =[
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def lowerCamelCase__ (__lowerCamelCase ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_SCREAMING_SNAKE_CASE : str = k.replace(__lowerCamelCase, __lowerCamelCase )
if k.startswith("encoder" ):
_SCREAMING_SNAKE_CASE : List[str] = k.replace(".attn", ".self_attn" )
_SCREAMING_SNAKE_CASE : List[str] = k.replace("norm1", "self_attn_layer_norm" )
_SCREAMING_SNAKE_CASE : Optional[int] = k.replace("norm2", "final_layer_norm" )
elif k.startswith("decoder" ):
_SCREAMING_SNAKE_CASE : int = k.replace("norm1", "self_attn_layer_norm" )
_SCREAMING_SNAKE_CASE : Dict = k.replace("norm2", "encoder_attn_layer_norm" )
_SCREAMING_SNAKE_CASE : str = k.replace("norm3", "final_layer_norm" )
return k
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_SCREAMING_SNAKE_CASE : Tuple = sd.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = k.replace("layernorm_embedding", "layer_norm" )
assert new_k not in sd
_SCREAMING_SNAKE_CASE : Optional[int] = v
UpperCamelCase__ =['START']
@torch.no_grad()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = torch.load(__lowerCamelCase, map_location="cpu" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model["model"]
_SCREAMING_SNAKE_CASE : Optional[Any] = BlenderbotConfig.from_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = m.model.state_dict().keys()
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_SCREAMING_SNAKE_CASE : Optional[int] = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_SCREAMING_SNAKE_CASE : List[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
UpperCamelCase__ =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 325
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 1
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase__ ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , repo_id="test-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : int = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> str:
CustomConfig.register_for_auto_class()
_SCREAMING_SNAKE_CASE : List[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 4_2 )
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_SCREAMING_SNAKE_CASE : Dict = c.n_embd + 1 # int
_SCREAMING_SNAKE_CASE : int = c.resid_pdrop + 1.0 # float
_SCREAMING_SNAKE_CASE : Tuple = not c.scale_attn_weights # bool
_SCREAMING_SNAKE_CASE : Optional[Any] = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(__lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(__lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(__lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(__lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = PretrainedConfig()
_SCREAMING_SNAKE_CASE : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [key for key, value in config_common_kwargs.items() if value == getattr(__lowerCamelCase , __lowerCamelCase )]
if len(__lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(__lowerCamelCase )}.""" )
def UpperCamelCase_ ( self ) -> str:
with self.assertRaises(__lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
_SCREAMING_SNAKE_CASE : Any = mock.Mock()
_SCREAMING_SNAKE_CASE : Optional[int] = 5_0_0
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = HTTPError
_SCREAMING_SNAKE_CASE : Optional[int] = {}
# Download this model to make sure it's in the cache.
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__lowerCamelCase ) as mock_head:
_SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained("bert-base-cased" )
_SCREAMING_SNAKE_CASE : Dict = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(__lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_SCREAMING_SNAKE_CASE : str = ["config.42.0.0.json"]
_SCREAMING_SNAKE_CASE : Any = 7_6_8
configuration.save_pretrained(__lowerCamelCase )
shutil.move(os.path.join(__lowerCamelCase , "config.4.0.0.json" ) , os.path.join(__lowerCamelCase , "config.42.0.0.json" ) )
_SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_SCREAMING_SNAKE_CASE : str = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_SCREAMING_SNAKE_CASE : Optional[int] = "v4.0.0"
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
__lowerCamelCase , return_unused_kwargs=__lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_SCREAMING_SNAKE_CASE : Optional[Any] = "v3.0.0"
_SCREAMING_SNAKE_CASE : Tuple = old_transformers.models.auto.AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 325
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ =pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"] )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
inspect_dataset(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = path + ".py"
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path", ["accuracy"] )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
inspect_metric(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = path + ".py"
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits", [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = get_dataset_config_info(__lowerCamelCase, config_name=__lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception", [
("paws", None, ValueError),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with pytest.raises(__lowerCamelCase ):
get_dataset_config_info(__lowerCamelCase, config_name=__lowerCamelCase )
@pytest.mark.parametrize(
"path, expected", [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = get_dataset_config_names(__lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config", [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = get_dataset_infos(__lowerCamelCase )
assert list(infos.keys() ) == expected_configs
_SCREAMING_SNAKE_CASE : Any = expected_configs[0]
assert expected_config in infos
_SCREAMING_SNAKE_CASE : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits", [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = get_dataset_infos(__lowerCamelCase )
assert expected_config in infos
_SCREAMING_SNAKE_CASE : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception", [
("paws", None, ValueError),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with pytest.raises(__lowerCamelCase ):
get_dataset_split_names(__lowerCamelCase, config_name=__lowerCamelCase )
| 325
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ ={'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TOKENIZER_CLASSES
else:
_SCREAMING_SNAKE_CASE : str = {tokenizer_name: getattr(__lowerCamelCase, tokenizer_name + "Fast" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_SCREAMING_SNAKE_CASE : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
_SCREAMING_SNAKE_CASE : str = True
if checkpoint_name is None:
_SCREAMING_SNAKE_CASE : Optional[int] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_SCREAMING_SNAKE_CASE : int = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_SCREAMING_SNAKE_CASE : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase, force_download=__lowerCamelCase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = checkpoint.split("/" )
_SCREAMING_SNAKE_CASE : int = os.path.join(__lowerCamelCase, __lowerCamelCase )
elif add_prefix:
_SCREAMING_SNAKE_CASE : List[Any] = checkpoint
_SCREAMING_SNAKE_CASE : Optional[Any] = dump_path
else:
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : int = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_SCREAMING_SNAKE_CASE : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_SCREAMING_SNAKE_CASE : Dict = file_path.split(__lowerCamelCase )[-1][0]
if next_char == "/":
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.save_pretrained(
__lowerCamelCase, legacy_format=__lowerCamelCase, filename_prefix=__lowerCamelCase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__lowerCamelCase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
UpperCamelCase__ =parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : List[str] = 0
for a in range(1, n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_SCREAMING_SNAKE_CASE : List[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
_SCREAMING_SNAKE_CASE : int = n - a - b
if c * c == (a * a + b * b):
_SCREAMING_SNAKE_CASE : str = a * b * c
if candidate >= product:
_SCREAMING_SNAKE_CASE : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 325
|
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , __lowerCamelCase = False ) -> Dict:
_SCREAMING_SNAKE_CASE : Any = scheduler
_SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(__lowerCamelCase , (list, tuple) ) else [optimizers]
_SCREAMING_SNAKE_CASE : List[Any] = split_batches
_SCREAMING_SNAKE_CASE : List[str] = step_with_optimizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = GradientState()
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> List[str]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_SCREAMING_SNAKE_CASE : Dict = AcceleratorState().num_processes
for _ in range(__lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
else:
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
return self.scheduler.get_last_lr()
def UpperCamelCase_ ( self ) -> Optional[int]:
return self.scheduler.state_dict()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
self.scheduler.load_state_dict(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
return self.scheduler.get_lr()
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
return self.scheduler.print_lr(*__lowerCamelCase , **__lowerCamelCase )
| 325
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 1
|
from __future__ import annotations
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase = 0 ) -> int:
_SCREAMING_SNAKE_CASE : int = key
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> list[str]:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> list[str]:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> str:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
_SCREAMING_SNAKE_CASE : Tuple = ""
for ch in content:
ans += chr(ord(__lowerCamelCase ) ^ key )
return ans
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> str:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
_SCREAMING_SNAKE_CASE : int = ""
for ch in content:
ans += chr(ord(__lowerCamelCase ) ^ key )
return ans
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> bool:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
try:
with open(__lowerCamelCase ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__lowerCamelCase , __lowerCamelCase ) )
except OSError:
return False
return True
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> bool:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
try:
with open(__lowerCamelCase ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__lowerCamelCase , __lowerCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 325
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ =torch.device('cpu')
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
def lowerCamelCase__ (__lowerCamelCase ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = dct.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = val
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = []
for k in state_dict.keys():
_SCREAMING_SNAKE_CASE : Optional[Any] = k
if ".pwconv" in k:
_SCREAMING_SNAKE_CASE : List[str] = k_new.replace(".pwconv", ".point_wise_conv" )
if ".dwconv" in k:
_SCREAMING_SNAKE_CASE : Any = k_new.replace(".dwconv", ".depth_wise_conv" )
if ".Proj." in k:
_SCREAMING_SNAKE_CASE : Tuple = k_new.replace(".Proj.", ".proj." )
if "patch_embed" in k_new:
_SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace("patch_embed", "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
_SCREAMING_SNAKE_CASE : Optional[Any] = k_new.split("." )
if ls[2].isdigit():
_SCREAMING_SNAKE_CASE : Optional[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
_SCREAMING_SNAKE_CASE : Tuple = k_new.replace("network", "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_SCREAMING_SNAKE_CASE : Optional[Any] = 1000
_SCREAMING_SNAKE_CASE : Union[str, Any] = "huggingface/label-files"
_SCREAMING_SNAKE_CASE : Tuple = "imagenet-1k-id2label.json"
_SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : Tuple = idalabel
_SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_SCREAMING_SNAKE_CASE : int = [3, 3, 6, 4]
_SCREAMING_SNAKE_CASE : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_SCREAMING_SNAKE_CASE : Tuple = [3, 3, 9, 6]
_SCREAMING_SNAKE_CASE : Dict = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_SCREAMING_SNAKE_CASE : Optional[int] = [4, 3, 10, 5]
_SCREAMING_SNAKE_CASE : List[str] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_SCREAMING_SNAKE_CASE : Dict = [4, 4, 12, 6]
_SCREAMING_SNAKE_CASE : List[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
_SCREAMING_SNAKE_CASE : Dict = torch.hub.load_state_dict_from_url(__lowerCamelCase, map_location="cpu", check_hash=__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Tuple = torch.load(__lowerCamelCase, map_location="cpu" )
_SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint
_SCREAMING_SNAKE_CASE : Dict = create_rename_keys(__lowerCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
_SCREAMING_SNAKE_CASE : List[Any] = SwiftFormerForImageClassification(__lowerCamelCase ).eval()
hf_model.load_state_dict(__lowerCamelCase )
# prepare test inputs
_SCREAMING_SNAKE_CASE : Any = prepare_img()
_SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor.from_pretrained("preprocessor_config" )
_SCREAMING_SNAKE_CASE : List[Any] = processor(images=__lowerCamelCase, return_tensors="pt" )
# compare outputs from both models
_SCREAMING_SNAKE_CASE : Optional[Any] = get_expected_output(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5], __lowerCamelCase, atol=1e-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
UpperCamelCase__ =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 325
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
| 1
|
import comet # From: unbabel-comet
import torch
import datasets
UpperCamelCase__ =datasets.logging.get_logger(__name__)
UpperCamelCase__ ='\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
UpperCamelCase__ ='\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
UpperCamelCase__ ='\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
if self.config_name == "default":
_SCREAMING_SNAKE_CASE : Optional[int] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=False ) -> Union[str, Any]:
if gpus is None:
_SCREAMING_SNAKE_CASE : int = 1 if torch.cuda.is_available() else 0
_SCREAMING_SNAKE_CASE : Any = {"src": sources, "mt": predictions, "ref": references}
_SCREAMING_SNAKE_CASE : Any = [dict(zip(__lowerCamelCase , __lowerCamelCase ) ) for t in zip(*data.values() )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.scorer.predict(__lowerCamelCase , gpus=__lowerCamelCase , progress_bar=__lowerCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 325
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 1
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 1
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase__ =importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase__ =[
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase__ (__lowerCamelCase ):
if "://" in dataset_path:
_SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_path.split("://" )[1]
return dataset_path
def lowerCamelCase__ (__lowerCamelCase ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(__lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowerCamelCase ), fs._strip_protocol(__lowerCamelCase ) )
else:
fs.mv(__lowerCamelCase, __lowerCamelCase, recursive=__lowerCamelCase )
def lowerCamelCase__ ():
if hasattr(fsspec.asyn, "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 325
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCamelCase__ =TypeVar('T')
class lowerCAmelCase__( Generic[T] ):
'''simple docstring'''
def __init__( self , __lowerCamelCase = True ) -> None:
_SCREAMING_SNAKE_CASE : dict[T, list[T]] = {} # dictionary of lists
_SCREAMING_SNAKE_CASE : Any = directed
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
self.adj_list[destination_vertex].append(__lowerCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [destination_vertex]
_SCREAMING_SNAKE_CASE : Dict = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [destination_vertex]
_SCREAMING_SNAKE_CASE : List[Any] = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 325
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ ={
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=5_1_2 , __lowerCamelCase="cls" , __lowerCamelCase=False , __lowerCamelCase=True , **__lowerCamelCase , ) -> Any:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = project_dim
_SCREAMING_SNAKE_CASE : List[str] = pooler_fn
_SCREAMING_SNAKE_CASE : Union[str, Any] = learn_encoder
_SCREAMING_SNAKE_CASE : Any = use_attention_mask
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = [R'pooler', R'logit_scale']
__snake_case = [R'position_ids', R'predictions.decoder.bias']
__snake_case = 'roberta'
__snake_case = RobertaSeriesConfig
def __init__( self , __lowerCamelCase ) -> Optional[int]:
super().__init__(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = XLMRobertaModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(__lowerCamelCase , "has_pre_transformation" , __lowerCamelCase )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCamelCase_ ( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , ) -> Tuple:
_SCREAMING_SNAKE_CASE : str = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE : List[str] = self.base_model(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__lowerCamelCase , )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["hidden_states"][-2]
_SCREAMING_SNAKE_CASE : str = self.pre_LN(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.transformation_pre(__lowerCamelCase )
return TransformationModelOutput(
projection_state=__lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_SCREAMING_SNAKE_CASE : Any = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 325
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 1
|
from math import isclose, sqrt
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = point_y / 4 / point_x
_SCREAMING_SNAKE_CASE : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_SCREAMING_SNAKE_CASE : str = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_SCREAMING_SNAKE_CASE : Dict = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_SCREAMING_SNAKE_CASE : Tuple = outgoing_gradient**2 + 4
_SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_SCREAMING_SNAKE_CASE : str = (point_y - outgoing_gradient * point_x) ** 2 - 100
_SCREAMING_SNAKE_CASE : int = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_SCREAMING_SNAKE_CASE : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_SCREAMING_SNAKE_CASE : List[Any] = x_minus if isclose(__lowerCamelCase, __lowerCamelCase ) else x_plus
_SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCamelCase__ (__lowerCamelCase = 1.4, __lowerCamelCase = -9.6 ):
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : float = first_x_coord
_SCREAMING_SNAKE_CASE : float = first_y_coord
_SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = next_point(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"{solution() = }")
| 325
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ =logging.getLogger(__name__)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__snake_case = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__snake_case = field(metadata={'help': 'Should contain the data files for the task.'} )
__snake_case = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case = field(
default=__lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
_SCREAMING_SNAKE_CASE : List[Any] = processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE : Tuple = processor.get_labels()
_SCREAMING_SNAKE_CASE : str = len(__lowerCamelCase )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__lowerCamelCase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
_SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_SCREAMING_SNAKE_CASE : List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=__lowerCamelCase, cache_dir=model_args.cache_dir, )
# Get datasets
_SCREAMING_SNAKE_CASE : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__lowerCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE : int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__lowerCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__lowerCamelCase, p.label_ids )}
# Data collator
_SCREAMING_SNAKE_CASE : List[str] = DataCollatorWithPadding(__lowerCamelCase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=__lowerCamelCase, args=__lowerCamelCase, train_dataset=__lowerCamelCase, eval_dataset=__lowerCamelCase, compute_metrics=__lowerCamelCase, data_collator=__lowerCamelCase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_SCREAMING_SNAKE_CASE : Optional[int] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_SCREAMING_SNAKE_CASE : str = trainer.evaluate()
_SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir, "eval_results.txt" )
if trainer.is_world_master():
with open(__lowerCamelCase, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s", __lowerCamelCase, __lowerCamelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(__lowerCamelCase )
return results
def lowerCamelCase__ (__lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 325
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
from manim import *
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE : Dict = Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE : List[str] = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Any = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Optional[int] = Text("CPU" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : List[Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE : Dict = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Optional[int] = Text("GPU" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : List[Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Dict = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : List[str] = Text("Model" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Dict = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : Tuple = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
model_cpu_arr.append(__lowerCamelCase )
self.add(*__lowerCamelCase , *__lowerCamelCase , *__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Any = Text("Loaded Checkpoint" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Any = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : List[str] = []
for i, rect in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
ckpt_arr.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCamelCase )
self.add(*__lowerCamelCase , *__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE : List[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE : int = [meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : List[Any] = [meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : int = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : str = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Tuple = Text("Disk" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Dict = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowerCamelCase , run_time=3 ) , Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, rect in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(FadeOut(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , *__lowerCamelCase ) , )
self.wait()
| 325
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 1
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase__ (__lowerCamelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> str:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = module
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Sequential(
nn.Linear(module.in_features , __lowerCamelCase , bias=__lowerCamelCase ) , nn.Linear(__lowerCamelCase , module.out_features , bias=__lowerCamelCase ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCamelCase_ ( self , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) -> List[Any]:
return self.module(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) + self.adapter(__lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109_6595_5269_2574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 1_0
def UpperCamelCase_ ( self ) -> List[Any]:
# Models and tokenizer
_SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(self.model_name )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> int:
super().setUp()
# Models and tokenizer
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
_SCREAMING_SNAKE_CASE : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" )
def UpperCamelCase_ ( self ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[str] = self.model_abit.config
self.assertTrue(hasattr(__lowerCamelCase , "quantization_config" ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = config.to_dict()
_SCREAMING_SNAKE_CASE : int = config.to_diff_dict()
_SCREAMING_SNAKE_CASE : List[Any] = config.to_json_string()
def UpperCamelCase_ ( self ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
_SCREAMING_SNAKE_CASE : str = self.model_fpaa.get_memory_footprint()
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_SCREAMING_SNAKE_CASE : List[str] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCamelCase_ ( self ) -> Dict:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__lowerCamelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : List[Any] = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = BitsAndBytesConfig()
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowerCamelCase , device_map="auto" )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
with self.assertRaises(__lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = BitsAndBytesConfig()
with self.assertRaises(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowerCamelCase , load_in_abit=__lowerCamelCase , device_map="auto" , bnb_abit_quant_type="nf4" , )
def UpperCamelCase_ ( self ) -> str:
with self.assertRaises(__lowerCamelCase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : Any = self.model_fpaa.to(torch.floataa )
_SCREAMING_SNAKE_CASE : int = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
_SCREAMING_SNAKE_CASE : Any = self.model_fpaa.to("cpu" )
# Check this does not throw an error
_SCREAMING_SNAKE_CASE : int = self.model_fpaa.half()
# Check this does not throw an error
_SCREAMING_SNAKE_CASE : Dict = self.model_fpaa.float()
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__lowerCamelCase , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = "t5-small"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
_SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(cls.model_name )
_SCREAMING_SNAKE_CASE : List[Any] = "Translate in German: Hello, my dog is cute"
def UpperCamelCase_ ( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
from transformers import TaForConditionalGeneration
_SCREAMING_SNAKE_CASE : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules
_SCREAMING_SNAKE_CASE : Optional[Any] = None
# test with `t5-small`
_SCREAMING_SNAKE_CASE : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" )
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_SCREAMING_SNAKE_CASE : Optional[int] = model.generate(**__lowerCamelCase )
# test with `flan-t5-small`
_SCREAMING_SNAKE_CASE : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowerCamelCase , device_map="auto" )
_SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_SCREAMING_SNAKE_CASE : Any = model.generate(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = modules
def UpperCamelCase_ ( self ) -> str:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_SCREAMING_SNAKE_CASE : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_SCREAMING_SNAKE_CASE : int = model.generate(**__lowerCamelCase )
# test with `flan-t5-small`
_SCREAMING_SNAKE_CASE : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowerCamelCase , device_map="auto" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_SCREAMING_SNAKE_CASE : int = model.generate(**__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Dict:
super().setUp()
# model_name
_SCREAMING_SNAKE_CASE : str = "bigscience/bloom-560m"
_SCREAMING_SNAKE_CASE : List[Any] = "t5-small"
# Different types of model
_SCREAMING_SNAKE_CASE : int = AutoModel.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" )
# Sequence classification model
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" )
# CausalLM model
_SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" )
# Seq2seq model
_SCREAMING_SNAKE_CASE : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__lowerCamelCase , device_map="auto" )
def UpperCamelCase_ ( self ) -> str:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> List[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> int:
super().setUp()
def UpperCamelCase_ ( self ) -> Optional[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_SCREAMING_SNAKE_CASE : Optional[Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
super().setUp()
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__lowerCamelCase , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
_SCREAMING_SNAKE_CASE : Optional[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = "facebook/opt-350m"
super().setUp()
def UpperCamelCase_ ( self ) -> int:
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_SCREAMING_SNAKE_CASE : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_SCREAMING_SNAKE_CASE : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE : List[Any] = LoRALayer(module.q_proj , rank=1_6 )
_SCREAMING_SNAKE_CASE : Any = LoRALayer(module.k_proj , rank=1_6 )
_SCREAMING_SNAKE_CASE : Any = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_SCREAMING_SNAKE_CASE : Tuple = model.forward(**__lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__lowerCamelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191_8548_5415_2187
| 325
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'naver-clova-ix/donut-base-finetuned-docvqa'
__snake_case = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__snake_case = 'document_qa'
__snake_case = AutoProcessor
__snake_case = VisionEncoderDecoderModel
__snake_case = ['image', 'text']
__snake_case = ['text']
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_SCREAMING_SNAKE_CASE : Any = task_prompt.replace("{user_input}" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.pre_processor.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="pt" ).input_ids
_SCREAMING_SNAKE_CASE : List[Any] = self.pre_processor(__lowerCamelCase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Tuple:
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowerCamelCase , ).sequences
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.pre_processor.batch_decode(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Tuple = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_SCREAMING_SNAKE_CASE : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_SCREAMING_SNAKE_CASE : Any = re.sub(r"<.*?>" , "" , __lowerCamelCase , count=1 ).strip() # remove first task start token
_SCREAMING_SNAKE_CASE : Optional[int] = self.pre_processor.tokenajson(__lowerCamelCase )
return sequence["answer"]
| 325
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'encodec'
def __init__( self , __lowerCamelCase=[1.5, 3.0, 6.0, 12.0, 24.0] , __lowerCamelCase=2_4_0_0_0 , __lowerCamelCase=1 , __lowerCamelCase=False , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=1_2_8 , __lowerCamelCase=3_2 , __lowerCamelCase=1 , __lowerCamelCase=[8, 5, 4, 2] , __lowerCamelCase="weight_norm" , __lowerCamelCase=7 , __lowerCamelCase=7 , __lowerCamelCase=3 , __lowerCamelCase=2 , __lowerCamelCase=True , __lowerCamelCase="reflect" , __lowerCamelCase=2 , __lowerCamelCase=2 , __lowerCamelCase=1.0 , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=None , __lowerCamelCase=True , **__lowerCamelCase , ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = target_bandwidths
_SCREAMING_SNAKE_CASE : Optional[Any] = sampling_rate
_SCREAMING_SNAKE_CASE : Optional[int] = audio_channels
_SCREAMING_SNAKE_CASE : List[Any] = normalize
_SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s
_SCREAMING_SNAKE_CASE : Union[str, Any] = overlap
_SCREAMING_SNAKE_CASE : Any = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_filters
_SCREAMING_SNAKE_CASE : int = num_residual_layers
_SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
_SCREAMING_SNAKE_CASE : Union[str, Any] = norm_type
_SCREAMING_SNAKE_CASE : Tuple = kernel_size
_SCREAMING_SNAKE_CASE : Optional[Any] = last_kernel_size
_SCREAMING_SNAKE_CASE : str = residual_kernel_size
_SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
_SCREAMING_SNAKE_CASE : List[Any] = use_causal_conv
_SCREAMING_SNAKE_CASE : List[str] = pad_mode
_SCREAMING_SNAKE_CASE : Optional[Any] = compress
_SCREAMING_SNAKE_CASE : List[Any] = num_lstm_layers
_SCREAMING_SNAKE_CASE : Tuple = trim_right_ratio
_SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_size
_SCREAMING_SNAKE_CASE : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
_SCREAMING_SNAKE_CASE : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase_ ( self ) -> int:
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 325
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 1
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
UpperCamelCase__ ='sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Dict:
super().setUp()
_SCREAMING_SNAKE_CASE : List[Any] = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : int = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def UpperCamelCase_ ( self ) -> Optional[int]:
MarianMTModel.from_pretrained(__lowerCamelCase )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = {
"$MAX_LEN": 6_4,
"$BS": 6_4,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
_SCREAMING_SNAKE_CASE : int = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
_SCREAMING_SNAKE_CASE : List[str] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE : List[str] = bash_script.replace(__lowerCamelCase , str(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE : str = F"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE : int = ["finetune.py"] + bash_script.split() + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE : int = pl.Trainer.add_argparse_args(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = SummarizationModule.add_model_specific_args(__lowerCamelCase , os.getcwd() )
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
_SCREAMING_SNAKE_CASE : Union[str, Any] = main(__lowerCamelCase )
# Check metrics
_SCREAMING_SNAKE_CASE : List[Any] = load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE : Optional[int] = metrics["val"][0]
_SCREAMING_SNAKE_CASE : Any = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __lowerCamelCase )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 1_7 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.listdir(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = [x for x in contents if x.endswith(".ckpt" )][0]
_SCREAMING_SNAKE_CASE : int = os.path.join(args.output_dir , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = torch.load(__lowerCamelCase , map_location="cpu" )
_SCREAMING_SNAKE_CASE : Any = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE : Optional[int] = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@timeout_decorator.timeout(6_0_0 )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : str = F"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
_SCREAMING_SNAKE_CASE : List[str] = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_2_8,
"$BS": 1_6,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
_SCREAMING_SNAKE_CASE : List[str] = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
_SCREAMING_SNAKE_CASE : Tuple = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE : Optional[Any] = bash_script.replace(__lowerCamelCase , str(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE : str = bash_script.replace("--fp16" , "" )
_SCREAMING_SNAKE_CASE : str = 6
_SCREAMING_SNAKE_CASE : str = (
["distillation.py"]
+ bash_script.split()
+ [
F"""--output_dir={output_dir}""",
"--gpus=1",
"--learning_rate=1e-3",
F"""--num_train_epochs={epochs}""",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE : Tuple = pl.Trainer.add_argparse_args(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = SummarizationDistiller.add_model_specific_args(__lowerCamelCase , os.getcwd() )
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE : Optional[int] = distill_main(__lowerCamelCase )
# Check metrics
_SCREAMING_SNAKE_CASE : Optional[int] = load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE : List[str] = metrics["val"][0]
_SCREAMING_SNAKE_CASE : Optional[int] = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __lowerCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE : Tuple = os.listdir(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = [x for x in contents if x.endswith(".ckpt" )][0]
_SCREAMING_SNAKE_CASE : Dict = os.path.join(args.output_dir , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(__lowerCamelCase , map_location="cpu" )
_SCREAMING_SNAKE_CASE : List[str] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE : List[str] = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 325
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'vit_msn'
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-06 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Any = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[int] = image_size
_SCREAMING_SNAKE_CASE : Any = patch_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_channels
_SCREAMING_SNAKE_CASE : Dict = qkv_bias
| 325
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 325
| 1
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ''
__snake_case = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ) -> int:
super().__init__(self , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = repo_info
_SCREAMING_SNAKE_CASE : Any = token
_SCREAMING_SNAKE_CASE : Tuple = None
def UpperCamelCase_ ( self ) -> int:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE : List[Any] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__lowerCamelCase ): {"name": str(__lowerCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = "rb" , **__lowerCamelCase , ) -> Optional[Any]:
if not isinstance(self.repo_info , __lowerCamelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_SCREAMING_SNAKE_CASE : List[Any] = hf_hub_url(self.repo_info.id , __lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCamelCase , mode=__lowerCamelCase , headers=get_authentication_headers_for_url(__lowerCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> List[str]:
self._get_dirs()
_SCREAMING_SNAKE_CASE : List[Any] = self._strip_protocol(__lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=False , **__lowerCamelCase ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE : List[str] = PurePosixPath(path.strip("/" ) )
_SCREAMING_SNAKE_CASE : List[str] = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE : int = PurePosixPath(p.strip("/" ) )
_SCREAMING_SNAKE_CASE : int = p.parent
if root == path:
_SCREAMING_SNAKE_CASE : str = f
_SCREAMING_SNAKE_CASE : int = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 325
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Tuple = SamImageProcessor()
_SCREAMING_SNAKE_CASE : Dict = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def UpperCamelCase_ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : Dict = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
_SCREAMING_SNAKE_CASE : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : str = SamProcessor(image_processor=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : List[Any] = image_processor(__lowerCamelCase , return_tensors="np" )
_SCREAMING_SNAKE_CASE : List[Any] = processor(images=__lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : int = SamProcessor(image_processor=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = [torch.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : List[str] = [[1_7_6_4, 2_6_4_6]]
_SCREAMING_SNAKE_CASE : int = [[6_8_3, 1_0_2_4]]
_SCREAMING_SNAKE_CASE : Optional[int] = processor.post_process_masks(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_SCREAMING_SNAKE_CASE : int = processor.post_process_masks(
__lowerCamelCase , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
_SCREAMING_SNAKE_CASE : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : Any = processor.post_process_masks(__lowerCamelCase , np.array(__lowerCamelCase ) , np.array(__lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = processor.post_process_masks(__lowerCamelCase , np.array(__lowerCamelCase ) , np.array(__lowerCamelCase ) )
@require_vision
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Dict = SamImageProcessor()
_SCREAMING_SNAKE_CASE : List[str] = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def UpperCamelCase_ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : str = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
_SCREAMING_SNAKE_CASE : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Tuple = SamProcessor(image_processor=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : Tuple = image_processor(__lowerCamelCase , return_tensors="np" )
_SCREAMING_SNAKE_CASE : List[Any] = processor(images=__lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Any = SamProcessor(image_processor=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = [tf.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : Dict = [[1_7_6_4, 2_6_4_6]]
_SCREAMING_SNAKE_CASE : str = [[6_8_3, 1_0_2_4]]
_SCREAMING_SNAKE_CASE : Optional[int] = processor.post_process_masks(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_SCREAMING_SNAKE_CASE : Tuple = processor.post_process_masks(
__lowerCamelCase , tf.convert_to_tensor(__lowerCamelCase ) , tf.convert_to_tensor(__lowerCamelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
_SCREAMING_SNAKE_CASE : Optional[Any] = [np.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : Dict = processor.post_process_masks(
__lowerCamelCase , np.array(__lowerCamelCase ) , np.array(__lowerCamelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_SCREAMING_SNAKE_CASE : int = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_SCREAMING_SNAKE_CASE : List[Any] = processor.post_process_masks(
__lowerCamelCase , np.array(__lowerCamelCase ) , np.array(__lowerCamelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : List[Any] = SamImageProcessor()
_SCREAMING_SNAKE_CASE : int = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def UpperCamelCase_ ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Any = SamProcessor(image_processor=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_SCREAMING_SNAKE_CASE : Optional[Any] = [tf.convert_to_tensor(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE : str = [torch.tensor(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE : int = [[1_7_6_4, 2_6_4_6]]
_SCREAMING_SNAKE_CASE : List[Any] = [[6_8_3, 1_0_2_4]]
_SCREAMING_SNAKE_CASE : Optional[Any] = processor.post_process_masks(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Any = processor.post_process_masks(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Optional[Any] = SamProcessor(image_processor=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : str = image_processor(__lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
_SCREAMING_SNAKE_CASE : List[Any] = processor(images=__lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
_SCREAMING_SNAKE_CASE : Tuple = image_processor(__lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
_SCREAMING_SNAKE_CASE : List[Any] = processor(images=__lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
| 325
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_SCREAMING_SNAKE_CASE : List[Any] = str(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = "".join(sorted(__lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase__ (__lowerCamelCase = 99 ):
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[int] = 1
while True:
if check_bouncy(__lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 325
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325
| 1
|
from __future__ import annotations
import requests
UpperCamelCase__ =set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = 1, __lowerCamelCase = "new", __lowerCamelCase = None ):
_SCREAMING_SNAKE_CASE : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowerCamelCase ) - valid_terms ) ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""", headers={"User-agent": "A random string"}, )
if response.status_code == 429:
raise requests.HTTPError
_SCREAMING_SNAKE_CASE : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowerCamelCase )}
_SCREAMING_SNAKE_CASE : int = {}
for id_ in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 325
|
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'mobilenet_v1'
def __init__( self , __lowerCamelCase=3 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1.0 , __lowerCamelCase=8 , __lowerCamelCase="relu6" , __lowerCamelCase=True , __lowerCamelCase=0.999 , __lowerCamelCase=0.02 , __lowerCamelCase=0.001 , **__lowerCamelCase , ) -> Tuple:
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : Tuple = image_size
_SCREAMING_SNAKE_CASE : Any = depth_multiplier
_SCREAMING_SNAKE_CASE : int = min_depth
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
_SCREAMING_SNAKE_CASE : str = tf_padding
_SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCamelCase_ ( self ) -> float:
return 1E-4
| 325
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 1
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=4 , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : Any = seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : Any = use_attention_mask
_SCREAMING_SNAKE_CASE : Any = use_token_type_ids
_SCREAMING_SNAKE_CASE : str = use_labels
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_size
_SCREAMING_SNAKE_CASE : str = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
_SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : str = num_choices
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
_SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = True
__snake_case = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Any = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase_ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Dict = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : str = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : List[str] = 5_0_0_0_0
_SCREAMING_SNAKE_CASE : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
| 325
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase__ ='\nimport os\n'
UpperCamelCase__ ='\ndef foo():\n import os\n return False\n'
UpperCamelCase__ ='\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
UpperCamelCase__ ='\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
UpperCamelCase__ ='\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
UpperCamelCase__ ='\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
UpperCamelCase__ ='\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
UpperCamelCase__ ='\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
UpperCamelCase__ ='\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
UpperCamelCase__ ='\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
UpperCamelCase__ =[
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case", __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(__lowerCamelCase, "test_file.py" )
with open(__lowerCamelCase, "w" ) as _tmp_file:
_tmp_file.write(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = get_imports(__lowerCamelCase )
assert parsed_imports == ["os"]
| 325
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 42
@flax_register_to_config
class lowerCAmelCase__( nn.Module , __lowercase , __lowercase ):
'''simple docstring'''
__snake_case = 3_2
__snake_case = 4
__snake_case = 4
__snake_case = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__snake_case = False
__snake_case = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__snake_case = 2
__snake_case = 8
__snake_case = None
__snake_case = 1_2_8_0
__snake_case = 0.0
__snake_case = False
__snake_case = jnp.floataa
__snake_case = True
__snake_case = 0
__snake_case = False
def UpperCamelCase_ ( self , __lowerCamelCase ) -> FrozenDict:
# init input tensors
_SCREAMING_SNAKE_CASE : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
_SCREAMING_SNAKE_CASE : str = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.ones((1,) , dtype=jnp.intaa )
_SCREAMING_SNAKE_CASE : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = jax.random.split(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = {"params": params_rng, "dropout": dropout_rng}
return self.init(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["params"]
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.block_out_channels
_SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
_SCREAMING_SNAKE_CASE : Tuple = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_SCREAMING_SNAKE_CASE : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_SCREAMING_SNAKE_CASE : str = FlaxTimestepEmbedding(__lowerCamelCase , dtype=self.dtype )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_SCREAMING_SNAKE_CASE : Optional[Any] = output_channel
_SCREAMING_SNAKE_CASE : List[Any] = block_out_channels[i]
_SCREAMING_SNAKE_CASE : List[str] = i == len(__lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_SCREAMING_SNAKE_CASE : Dict = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = down_blocks
# mid
_SCREAMING_SNAKE_CASE : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : List[Any] = list(reversed(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : int = list(reversed(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = list(reversed(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_SCREAMING_SNAKE_CASE : str = output_channel
_SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[i]
_SCREAMING_SNAKE_CASE : int = reversed_block_out_channels[min(i + 1 , len(__lowerCamelCase ) - 1 )]
_SCREAMING_SNAKE_CASE : Optional[Any] = i == len(__lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_SCREAMING_SNAKE_CASE : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , prev_output_channel=__lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_SCREAMING_SNAKE_CASE : str = FlaxUpBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , prev_output_channel=__lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = output_channel
_SCREAMING_SNAKE_CASE : Optional[int] = up_blocks
# out
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
_SCREAMING_SNAKE_CASE : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase = True , __lowerCamelCase = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(__lowerCamelCase , jnp.ndarray ):
_SCREAMING_SNAKE_CASE : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE : Optional[int] = timesteps.astype(dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.expand_dims(__lowerCamelCase , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = self.time_proj(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.time_embedding(__lowerCamelCase )
# 2. pre-process
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE : Any = self.conv_in(__lowerCamelCase )
# 3. down
_SCREAMING_SNAKE_CASE : str = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = down_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = down_block(__lowerCamelCase , __lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_SCREAMING_SNAKE_CASE : List[str] = ()
for down_block_res_sample, down_block_additional_residual in zip(
__lowerCamelCase , __lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
_SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_SCREAMING_SNAKE_CASE : Tuple = down_block_res_samples[-(self.layers_per_block + 1) :]
_SCREAMING_SNAKE_CASE : str = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = up_block(
__lowerCamelCase , temb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , res_hidden_states_tuple=__lowerCamelCase , deterministic=not train , )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = up_block(__lowerCamelCase , temb=__lowerCamelCase , res_hidden_states_tuple=__lowerCamelCase , deterministic=not train )
# 6. post-process
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.silu(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_out(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = jnp.transpose(__lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__lowerCamelCase )
| 325
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 1
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 1
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 1
|
import os
def lowerCamelCase__ ():
with open(os.path.dirname(__lowerCamelCase ) + "/p022_names.txt" ) as file:
_SCREAMING_SNAKE_CASE : Dict = str(file.readlines()[0] )
_SCREAMING_SNAKE_CASE : Tuple = names.replace("\"", "" ).split("," )
names.sort()
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
_SCREAMING_SNAKE_CASE : List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 325
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece.model')
UpperCamelCase__ ={'target_lang': 'fi', 'source_lang': 'en'}
UpperCamelCase__ ='>>zh<<'
UpperCamelCase__ ='Helsinki-NLP/'
if is_torch_available():
UpperCamelCase__ ='pt'
elif is_tf_available():
UpperCamelCase__ ='tf'
else:
UpperCamelCase__ ='jax'
@require_sentencepiece
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = MarianTokenizer
__snake_case = False
__snake_case = True
def UpperCamelCase_ ( self ) -> Tuple:
super().setUp()
_SCREAMING_SNAKE_CASE : Dict = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_SCREAMING_SNAKE_CASE : List[str] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_SCREAMING_SNAKE_CASE : Dict = Path(self.tmpdirname )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
_SCREAMING_SNAKE_CASE : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Union[str, Any]:
return (
"This is a test",
"This is a test",
)
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = "</s>"
_SCREAMING_SNAKE_CASE : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__lowerCamelCase ) , 9 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
_SCREAMING_SNAKE_CASE : Tuple = en_de_tokenizer(["I am a small frog"] , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__lowerCamelCase , batch.input_ids[0] )
_SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = [x.name for x in Path(__lowerCamelCase ).glob("*" )]
self.assertIn("source.spm" , __lowerCamelCase )
MarianTokenizer.from_pretrained(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : List[str] = tok(
["I am a small frog" * 1_0_0_0, "I am a small frog"] , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Tuple = tok(["I am a tiny frog", "I am a small frog"] , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
# fmt: off
_SCREAMING_SNAKE_CASE : str = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
_SCREAMING_SNAKE_CASE : int = "Tämä on testi"
_SCREAMING_SNAKE_CASE : Any = "This is a test"
_SCREAMING_SNAKE_CASE : Dict = [7_6, 7, 2_0_4_7, 2]
_SCREAMING_SNAKE_CASE : Optional[Any] = [6_9, 1_2, 1_1, 9_4_0, 2]
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = tokenizer(text_target=__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 325
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 1
|
import math
def lowerCamelCase__ (__lowerCamelCase ):
return math.sqrt(__lowerCamelCase ) * math.sqrt(__lowerCamelCase ) == num
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : str = n
while left <= right:
_SCREAMING_SNAKE_CASE : Tuple = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_SCREAMING_SNAKE_CASE : Union[str, Any] = mid - 1
else:
_SCREAMING_SNAKE_CASE : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 1
|
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
return False
_SCREAMING_SNAKE_CASE : List[Any] = len(__lowerCamelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint], __lowerCamelCase )
else:
return binary_search(a_list[midpoint + 1 :], __lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =input('Enter numbers separated by comma:\n').strip()
UpperCamelCase__ =[int(item.strip()) for item in user_input.split(',')]
UpperCamelCase__ =int(input('Enter the number to be found in the list:\n').strip())
UpperCamelCase__ ='' if binary_search(sequence, target) else 'not '
print(f"{target} was {not_str}found in {sequence}")
| 325
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 1
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase__ =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.dirname(os.path.realpath(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(__lowerCamelCase, "words.txt" )
_SCREAMING_SNAKE_CASE : Optional[Any] = ""
with open(__lowerCamelCase ) as f:
_SCREAMING_SNAKE_CASE : List[str] = f.readline()
_SCREAMING_SNAKE_CASE : List[str] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_SCREAMING_SNAKE_CASE : Tuple = [
word
for word in [sum(ord(__lowerCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 325
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [0] * len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
_SCREAMING_SNAKE_CASE : List[Any] = [1] * len(__lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
_SCREAMING_SNAKE_CASE : Any = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
print(max(__lowerCamelCase ) )
# Adjacency list of Graph
UpperCamelCase__ ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 325
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 1
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCamelCase__ ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCamelCase ), version.parse(__lowerCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = None ):
_SCREAMING_SNAKE_CASE : List[Any] = f"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$", __lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = requirement, None, None
else:
_SCREAMING_SNAKE_CASE : int = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", __lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f""" got {requirement}""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = match[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = want_full.split("," ) # there could be multiple requirements
_SCREAMING_SNAKE_CASE : Tuple = {}
for w in want_range:
_SCREAMING_SNAKE_CASE : List[Any] = re.findall(R"^([\s!=<>]{1,2})(.+)", __lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f""" but got {requirement}""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = match[0]
_SCREAMING_SNAKE_CASE : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
_SCREAMING_SNAKE_CASE : List[Any] = ".".join([str(__lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return
# check if any version is installed
try:
_SCREAMING_SNAKE_CASE : List[str] = importlib.metadata.version(__lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__lowerCamelCase, __lowerCamelCase )
| 325
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = [[0] * n for i in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = y_points[i]
for i in range(2, __lowerCamelCase ):
for j in range(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'git_vision_model'
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase="quick_gelu" , __lowerCamelCase=1E-5 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , **__lowerCamelCase , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_size
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Any = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
_SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
_SCREAMING_SNAKE_CASE : List[Any] = image_size
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Any = attention_dropout
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
_SCREAMING_SNAKE_CASE : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'git'
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=6 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=1_0_1 , __lowerCamelCase=1_0_2 , __lowerCamelCase=None , **__lowerCamelCase , ) -> int:
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , pad_token_id=__lowerCamelCase , **__lowerCamelCase )
if vision_config is None:
_SCREAMING_SNAKE_CASE : str = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
_SCREAMING_SNAKE_CASE : Optional[Any] = GitVisionConfig(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
_SCREAMING_SNAKE_CASE : Union[str, Any] = tie_word_embeddings
_SCREAMING_SNAKE_CASE : Tuple = num_image_with_embedding
_SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : List[Any] = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE : str = self.__class__.model_type
return output
| 325
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.