code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = 3
_lowercase : Dict = 2_50
_lowercase : Optional[int] = ids_tensor((batch_size, length), lowerCamelCase)
_lowercase : List[Any] = torch.ones((batch_size, length), device=lowerCamelCase, dtype=torch.float) / length
return input_ids, scores
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : List[Any] = self._get_tensors(5)
_lowercase : Optional[int] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
])
self.assertFalse(criteria(lowerCamelCase, lowerCamelCase))
_lowercase , _lowercase : Tuple = self._get_tensors(9)
self.assertFalse(criteria(lowerCamelCase, lowerCamelCase))
_lowercase , _lowercase : Dict = self._get_tensors(10)
self.assertTrue(criteria(lowerCamelCase, lowerCamelCase))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : List[str] = MaxLengthCriteria(max_length=10)
_lowercase , _lowercase : str = self._get_tensors(5)
self.assertFalse(criteria(lowerCamelCase, lowerCamelCase))
_lowercase , _lowercase : List[str] = self._get_tensors(9)
self.assertFalse(criteria(lowerCamelCase, lowerCamelCase))
_lowercase , _lowercase : Optional[int] = self._get_tensors(10)
self.assertTrue(criteria(lowerCamelCase, lowerCamelCase))
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MaxNewTokensCriteria(start_length=5, max_new_tokens=5)
_lowercase , _lowercase : Optional[int] = self._get_tensors(5)
self.assertFalse(criteria(lowerCamelCase, lowerCamelCase))
_lowercase , _lowercase : List[str] = self._get_tensors(9)
self.assertFalse(criteria(lowerCamelCase, lowerCamelCase))
_lowercase , _lowercase : Union[str, Any] = self._get_tensors(10)
self.assertTrue(criteria(lowerCamelCase, lowerCamelCase))
_lowercase : int = StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length, 10)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = self._get_tensors(5)
_lowercase : List[Any] = MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(lowerCamelCase, lowerCamelCase))
_lowercase : Union[str, Any] = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(lowerCamelCase, lowerCamelCase))
def UpperCamelCase ( self) -> int:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 10)
with self.assertWarns(lowerCamelCase):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 11)
_lowercase : Optional[Any] = validate_stopping_criteria(StoppingCriteriaList(), 11)
self.assertEqual(len(lowerCamelCase), 1)
| 21 | """simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
_UpperCAmelCase = 2
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 289 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : str = CTRLTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : str = False
def lowercase ( self : List[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
_UpperCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_UpperCAmelCase = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
_UpperCAmelCase = {"unk_token": "<unk>"}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def lowercase ( self : List[Any] , **snake_case_ : str ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Dict ):
_UpperCAmelCase = "adapt react readapt apt"
_UpperCAmelCase = "adapt react readapt apt"
return input_text, output_text
def lowercase ( self : str ):
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = "adapt react readapt apt"
_UpperCAmelCase = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
_UpperCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 22 | """simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289 | 0 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
UpperCAmelCase : List[Any] = b * b - 4 * a * c
UpperCAmelCase : int = (-b + sqrt(_lowerCAmelCase )) / (2 * a)
UpperCAmelCase : List[Any] = (-b - sqrt(_lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def snake_case_ ( ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 23 | """simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = UNetaDModel
_snake_case : List[str] = 'sample'
@property
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : List[Any] ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = UNetaDModel
_snake_case : Optional[Any] = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self : Dict ):
return (4, 32, 32)
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : str ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model_accelerate.to(__lowerCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
_UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase )
model_normal_load.to(__lowerCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__lowerCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) )
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[Any] = UNetaDModel
_snake_case : str = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Any ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**__lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : List[str] ):
# not required for this model
pass
| 289 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE__ :
def __init__(self : List[str] , a__ : str = "cpu" , a__ : str = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
__snake_case = device
__snake_case = CLIPTokenizerFast.from_pretrained(a__ )
__snake_case = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__snake_case = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__snake_case = torchvision.transforms.Resize(224 )
__snake_case = torchvision.transforms.CenterCrop(224 )
def a (self : Optional[Any] , a__ : str ):
"""simple docstring"""
__snake_case = self.resize(a__ )
__snake_case = self.center_crop(a__ )
__snake_case = self.normalize(a__ )
return images
def __call__(self : Union[str, Any] , a__ : Tuple=None , a__ : Tuple=None , **a__ : int ):
"""simple docstring"""
__snake_case = self.tokenizer(text=a__ , **a__ )
__snake_case = self.preprocess_img(a__ )
__snake_case = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Optional[int] , a__ : Dict=10 , a__ : Dict=0.0_1 , a__ : Union[str, Any]=None , a__ : Any=None , a__ : Optional[Any]=None , a__ : Dict=None , a__ : Union[str, Any]=None , a__ : List[str]=None , a__ : Union[str, Any]=False , a__ : Dict=True , a__ : Dict="image" , a__ : Optional[Any]=True , a__ : List[str]=False , a__ : Optional[Any]=False , a__ : List[str]=False , ):
"""simple docstring"""
super().__init__()
__snake_case = None
__snake_case = device if device else get_device()
if vqgan:
__snake_case = vqgan
else:
__snake_case = load_vqgan(self.device , conf_path=a__ , ckpt_path=a__ )
self.vqgan.eval()
if clip:
__snake_case = clip
else:
__snake_case = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__snake_case = ProcessorGradientFlow(device=self.device )
__snake_case = iterations
__snake_case = lr
__snake_case = log
__snake_case = make_grid
__snake_case = return_val
__snake_case = quantize
__snake_case = self.vqgan.decoder.z_shape
def a (self : str , a__ : Optional[int]=None , a__ : Tuple=None , a__ : str=5 , a__ : int=True ):
"""simple docstring"""
__snake_case = []
if output_path is None:
__snake_case = '''./animation.gif'''
if input_path is None:
__snake_case = self.save_path
__snake_case = sorted(glob(input_path + '''/*''' ) )
if not len(a__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(a__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__snake_case = total_duration / len(a__ )
__snake_case = [frame_duration] * len(a__ )
if extend_frames:
__snake_case = 1.5
__snake_case = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(a__ ) )
imageio.mimsave(a__ , a__ , duration=a__ )
print(f"""gif saved to {output_path}""" )
def a (self : List[Any] , a__ : List[str]=None , a__ : Optional[int]=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__snake_case = preprocess(Image.open(a__ ) , target_image_size=256 ).to(self.device )
__snake_case = preprocess_vqgan(a__ )
__snake_case , *__snake_case = self.vqgan.encode(a__ )
return z
def a (self : List[str] , a__ : int ):
"""simple docstring"""
__snake_case = self.latent.detach().requires_grad_()
__snake_case = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case = self.vqgan.quantize(a__ )
else:
__snake_case = trans_latent
return self.vqgan.decode(a__ )
def a (self : int , a__ : Dict , a__ : str , a__ : str=None ):
"""simple docstring"""
__snake_case = self.clip_preprocessor(text=a__ , images=a__ , return_tensors='''pt''' , padding=a__ )
__snake_case = self.clip(**a__ )
__snake_case = clip_outputs.logits_per_image
if weights is not None:
__snake_case = similarity_logits * weights
return similarity_logits.sum()
def a (self : Dict , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : int ):
"""simple docstring"""
__snake_case = self._get_clip_similarity(pos_prompts['''prompts'''] , a__ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__snake_case = self._get_clip_similarity(neg_prompts['''prompts'''] , a__ , weights=neg_prompts['''weights'''] )
else:
__snake_case = torch.tensor([1] , device=self.device )
__snake_case = -torch.log(a__ ) + torch.log(a__ )
return loss
def a (self : Any , a__ : Optional[Any] , a__ : List[str] , a__ : Tuple ):
"""simple docstring"""
__snake_case = torch.randn_like(self.latent , requires_grad=a__ , device=self.device )
__snake_case = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__snake_case = self._add_vector(a__ )
__snake_case = loop_post_process(a__ )
__snake_case = self._get_CLIP_loss(a__ , a__ , a__ )
print('''CLIP loss''' , a__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=a__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def a (self : int , a__ : Optional[Any] , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
wandb.init(reinit=a__ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__snake_case = Image.open(a__ )
__snake_case = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(a__ ) )
def a (self : Optional[int] , a__ : int ):
"""simple docstring"""
if not prompts:
return []
__snake_case = []
__snake_case = []
if isinstance(a__ , a__ ):
__snake_case = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(a__ , (tuple, list) ):
__snake_case = prompt[0]
__snake_case = float(prompt[1] )
elif ":" in prompt:
__snake_case , __snake_case = prompt.split(''':''' )
__snake_case = float(a__ )
else:
__snake_case = prompt
__snake_case = 1.0
processed_prompts.append(a__ )
weights.append(a__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a__ , device=self.device ),
}
def a (self : Optional[Any] , a__ : Union[str, Any] , a__ : Any=None , a__ : List[Any]=None , a__ : Optional[int]=True , a__ : Any=False , a__ : Any=True , a__ : int=True , a__ : List[str]=None , ):
"""simple docstring"""
if image_path:
__snake_case = self._get_latent(a__ )
else:
__snake_case = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(a__ , a__ , a__ )
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case = self.process_prompts(a__ )
__snake_case = self.process_prompts(a__ )
if save_final and save_path is None:
__snake_case = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
__snake_case = save_path + '''_''' + get_timestamp()
os.makedirs(a__ )
__snake_case = save_path
__snake_case = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(a__ ) )
__snake_case = loop_post_process(a__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a__ , a__ , a__ ) ):
if show_intermediate:
show_pil(a__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(a__ )} )
if show_final:
show_pil(a__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 24 | """simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = StableUnCLIPPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 32
_UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 289 | 0 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = CodeGenTokenizer
__UpperCamelCase : int = CodeGenTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = {'''add_prefix_space''': True}
__UpperCamelCase : str = False
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
SCREAMING_SNAKE_CASE__ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ : List[str] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = """lower newer"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer"""
return input_text, output_text
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : int = """lower newer"""
SCREAMING_SNAKE_CASE__ : Dict = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = """lower newer"""
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : str = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
pass
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=15 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
SCREAMING_SNAKE_CASE__ : Any = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ : Dict = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
SCREAMING_SNAKE_CASE__ : str = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
SCREAMING_SNAKE_CASE__ : str = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = """$$$"""
SCREAMING_SNAKE_CASE__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
SCREAMING_SNAKE_CASE__ : str = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
SCREAMING_SNAKE_CASE__ : int = """\nif len_a > len_b: result = a\nelse: result = b"""
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(SCREAMING_SNAKE_CASE__ , truncate_before_pattern=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
pass
| 25 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26 | """simple docstring"""
import requests
UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/"""
def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 289 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[Any] = torch.device('cpu')
def lowerCamelCase ():
__a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = dct.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = []
for k in state_dict.keys():
__a : List[Any] = k
if ".pwconv" in k:
__a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[str] = 1_000
__a : Tuple = 'huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : Dict = [3, 3, 6, 4]
__a : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Dict = [3, 3, 9, 6]
__a : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : Dict = [4, 3, 10, 5]
__a : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Tuple = [4, 4, 12, 6]
__a : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[Any] = checkpoint
__a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Tuple = prepare_img()
__a : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 27 | """simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_failure_array(lowercase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(lowercase ):
if pattern[j] == text[i]:
if j == (len(lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while j < len(lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(lowercase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase__ = """abc1abc12"""
UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase__ = """ABABX"""
UpperCAmelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase__ = """AAAB"""
UpperCAmelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase__ = """abcdabcy"""
UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 289 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(A__ , A__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
UpperCamelCase = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(A__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | """simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
UpperCAmelCase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
UpperCAmelCase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ):
_UpperCAmelCase = recall_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , )
return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
| 289 | 0 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | """simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a :
_snake_case : Tuple = PegasusConfig
_snake_case : int = {}
_snake_case : str = 'gelu'
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,):
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Optional[Any] = True
_snake_case : List[str] = False
_snake_case : Dict = False
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase )
_UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 289 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__a = logging.get_logger(__name__)
def a ( snake_case__: str , snake_case__: str , snake_case__: Any ):
'''simple docstring'''
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a ( snake_case__: np.ndarray , snake_case__: Optional[str] , snake_case__: Optional[str] = None ):
'''simple docstring'''
lowercase_ = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
lowercase_ = to_pil_image(snake_case__ )
lowercase_ , lowercase_ = pil_image.size
lowercase_ = pytesseract.image_to_data(snake_case__ , lang=snake_case__ , output_type='''dict''' , config=snake_case__ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase_ = [idx for idx, word in enumerate(snake_case__ ) if not word.strip()]
lowercase_ = [word for idx, word in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase_ = []
for x, y, w, h in zip(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowercase_ = [x, y, x + w, y + h]
actual_boxes.append(snake_case__ )
# finally, normalize the bounding boxes
lowercase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case__ , snake_case__ , snake_case__ ) )
assert len(snake_case__ ) == len(snake_case__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = ['pixel_values']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "" , **SCREAMING_SNAKE_CASE_ : int , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = apply_ocr
lowercase_ = ocr_lang
lowercase_ = tesseract_config
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase_ = (size['''height'''], size['''width'''])
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Dict , ) -> PIL.Image.Image:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase_ = []
lowercase_ = []
for image in images:
lowercase_ , lowercase_ = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
words_batch.append(SCREAMING_SNAKE_CASE_ )
boxes_batch.append(SCREAMING_SNAKE_CASE_ )
if do_resize:
lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase_ = [flip_channel_order(SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=SCREAMING_SNAKE_CASE_ )
if apply_ocr:
lowercase_ = words_batch
lowercase_ = boxes_batch
return data
| 30 | """simple docstring"""
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 2
_UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment
_UpperCAmelCase = [True] * (end + 1)
_UpperCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(lowercase )
for i in range(start * start ,end + 1 ,lowercase ):
_UpperCAmelCase = False
start += 1
prime += in_prime
_UpperCAmelCase = end + 1
_UpperCAmelCase = min(2 * end ,lowercase )
while low <= n:
_UpperCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_UpperCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowercase ,high + 1 ,lowercase ):
_UpperCAmelCase = False
for j in range(len(lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_UpperCAmelCase = high + 1
_UpperCAmelCase = min(high + end ,lowercase )
return prime
print(sieve(1_0**6))
| 289 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_UpperCAmelCase : List[Any] = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : List[str] = in_proj_weight[
: encoder_config.hidden_size, :
]
_UpperCAmelCase : Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_UpperCAmelCase : Tuple = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
_UpperCAmelCase : Tuple = val
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCAmelCase : List[Any] = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_UpperCAmelCase : List[str] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def UpperCamelCase_ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ViTConfig(image_size=384 , qkv_bias=_UpperCAmelCase )
_UpperCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_UpperCAmelCase : Dict = 1_024
_UpperCAmelCase : Union[str, Any] = 4_096
_UpperCAmelCase : int = 24
_UpperCAmelCase : Dict = 16
_UpperCAmelCase : int = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = "relu"
_UpperCAmelCase : Dict = 1_024
_UpperCAmelCase : int = True
_UpperCAmelCase : int = False
_UpperCAmelCase : Union[str, Any] = False
# load HuggingFace model
_UpperCAmelCase : List[Any] = ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase )
_UpperCAmelCase : List[str] = TrOCRForCausalLM(_UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = VisionEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
_UpperCAmelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" , check_hash=_UpperCAmelCase )["model"]
_UpperCAmelCase : str = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_UpperCAmelCase : Any = state_dict.pop(_UpperCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
_UpperCAmelCase : int = val
else:
_UpperCAmelCase : int = val
# load state dict
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
_UpperCAmelCase : Union[str, Any] = ViTImageProcessor(size=encoder_config.image_size )
_UpperCAmelCase : Tuple = RobertaTokenizer.from_pretrained("roberta-large" )
_UpperCAmelCase : Optional[Any] = TrOCRProcessor(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : int = processor(images=prepare_img(_UpperCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
_UpperCAmelCase : List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_UpperCAmelCase : List[str] = model(pixel_values=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
_UpperCAmelCase : List[str] = outputs.logits
_UpperCAmelCase : Any = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
_UpperCAmelCase : Optional[int] = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
_UpperCAmelCase : int = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 31 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289 | 0 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
a_ : int = num_of_nodes
a_ : list[list[int]] = []
a_ : dict[int, int] = {}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a_ : List[Any] = self.find_component(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
if component_size[u_node] <= component_size[v_node]:
a_ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(SCREAMING_SNAKE_CASE__ )
elif component_size[u_node] >= component_size[v_node]:
a_ : Dict = self.find_component(SCREAMING_SNAKE_CASE__ )
component_size[u_node] += component_size[v_node]
self.set_component(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
a_ : Union[str, Any] = []
a_ : Dict = 0
a_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
a_ : Any = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a_ , a_ , a_ : int = edge
a_ : str = self.m_component[u]
a_ : Dict = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a_ : Optional[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ , a_ , a_ : Tuple = edge
a_ : Optional[Any] = self.m_component[u]
a_ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
a_ : Union[str, Any] = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 | """simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def run_func(lowercase ):
@wraps(lowercase )
def run_in_eager_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
@wraps(lowercase )
@tf.function(experimental_compile=lowercase )
def run_in_graph_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class a ( lowerCAmelCase_ ):
_snake_case : TensorFlowBenchmarkArguments
_snake_case : PretrainedConfig
_snake_case : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCAmelCase , training=__lowerCAmelCase )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
__lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_UpperCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_UpperCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(__lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase )
_UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 289 | 0 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def lowercase ( __snake_case : np.ndarray , __snake_case : float ):
# For applying gaussian function for each element in matrix.
lowercase_ : Union[str, Any] = math.sqrt(__snake_case )
lowercase_ : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowercase ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int , __snake_case : int ):
lowercase_ : List[str] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowercase ( __snake_case : int , __snake_case : float ):
# Creates a gaussian kernel of given dimension.
lowercase_ : Tuple = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __snake_case ):
for j in range(0 , __snake_case ):
lowercase_ : List[str] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__snake_case , __snake_case )
def lowercase ( __snake_case : np.ndarray , __snake_case : float , __snake_case : float , __snake_case : int , ):
lowercase_ : Tuple = np.zeros(img.shape )
lowercase_ : Union[str, Any] = get_gauss_kernel(__snake_case , __snake_case )
lowercase_ , lowercase_ : List[Any] = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowercase_ : str = get_slice(__snake_case , __snake_case , __snake_case , __snake_case )
lowercase_ : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowercase_ : List[Any] = vec_gaussian(__snake_case , __snake_case )
lowercase_ : List[Any] = np.multiply(__snake_case , __snake_case )
lowercase_ : Union[str, Any] = np.multiply(__snake_case , __snake_case )
lowercase_ : Any = np.sum(__snake_case ) / np.sum(__snake_case )
lowercase_ : Optional[Any] = val
return imga
def lowercase ( __snake_case : list ):
lowercase_ : Optional[Any] = args[1] if args[1:] else '''../image_data/lena.jpg'''
lowercase_ : Dict = float(args[2] ) if args[2:] else 1.0
lowercase_ : int = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowercase_ : str = int(args[4] )
lowercase_ : Optional[int] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowercase_ : Dict = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__A , __A , __A , __A : List[str] = parse_args(sys.argv)
__A : str = cva.imread(filename, 0)
cva.imshow('''input image''', img)
__A : str = img / 255
__A : Any = out.astype('''float32''')
__A : List[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__A : Any = out * 255
__A : Optional[int] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 33 | """simple docstring"""
from math import pow
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCAmelCase = int(pow(lowercase ,lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
return current_sum, solutions_count
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A =get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class _a ( __a , unittest.TestCase ):
__a : List[Any] = BartphoTokenizer
__a : str = False
__a : str = True
def A ( self : Any ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
UpperCAmelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
UpperCAmelCase = BartphoTokenizer(lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Tuple , **lowercase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Dict , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = '''This is a là test'''
UpperCAmelCase = '''This is a<unk><unk> test'''
return input_text, output_text
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = BartphoTokenizer(lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCAmelCase = '''This is a là test'''
UpperCAmelCase = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
UpperCAmelCase = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
| 34 | """simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
UpperCAmelCase__ = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = EfficientNetConfig()
_UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = 10_00
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,)
return preprocessor
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase = sorted(set(lowercase ) )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )}
_UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase = """efficientnet.""" + item[1]
_UpperCAmelCase = """classifier.weight"""
_UpperCAmelCase = """classifier.bias"""
return key_mapping
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
_UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) )
else:
_UpperCAmelCase = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = model_classes[model_name](
include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,)
_UpperCAmelCase = original_model.trainable_variables
_UpperCAmelCase = original_model.non_trainable_variables
_UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase = param.numpy()
_UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase = get_efficientnet_config(lowercase )
_UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval()
_UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase = rename_keys(lowercase )
replace_params(lowercase ,lowercase ,lowercase )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase = convert_image_processor(lowercase )
_UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase = hf_model(**lowercase )
_UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase = False
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
_UpperCAmelCase = image.img_to_array(lowercase )
_UpperCAmelCase = np.expand_dims(lowercase ,axis=0 )
_UpperCAmelCase = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
UpperCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 289 | 0 |
'''simple docstring'''
from typing import Any
import numpy as np
def __snake_case( _lowerCAmelCase ) -> bool:
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : int = v.conjugate().T
snake_case__ : Optional[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __snake_case( ) -> None:
snake_case__ : int = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
snake_case__ : List[str] = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f"{a} is not hermitian."
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
snake_case__ : Optional[int] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f"{a} is not hermitian."
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 35 | """simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a :
def __init__( self : Union[str, Any] ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
_UpperCAmelCase = probability
def lowerCAmelCase_ ( self : Optional[Any] ):
return list(self.connections )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = 0
_UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = Counter(graph.get_nodes() )
_UpperCAmelCase = start
for _ in range(lowercase ):
_UpperCAmelCase = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = abs(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = 0
while n > 0:
res += n % 10
n //= 10
return res
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A ( _lowerCamelCase ):
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def A ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase , _lowerCamelCase ) -> None:
_lowerCAmelCase : Optional[Any] = F"{func.__name__}({value})"
_lowerCAmelCase : str = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 36 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCAmelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 289 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase="" ,__UpperCAmelCase="train" ) -> Optional[int]:
assert os.path.isdir(__UpperCAmelCase )
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[Any] = os.listdir(__UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase ,__UpperCAmelCase )
if not os.path.isfile(__UpperCAmelCase ):
continue
self.documents.append(__UpperCAmelCase )
def __len__( self ) -> Optional[Any]:
return len(self.documents )
def __getitem__( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Tuple = self.documents[idx]
lowerCAmelCase__ : List[str] = document_path.split("""/""" )[-1]
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as source:
lowerCAmelCase__ : Tuple = source.read()
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = process_story(__UpperCAmelCase )
return document_name, story_lines, summary_lines
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = list(filter(lambda UpperCamelCase : len(UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ : List[Any] = [_add_missing_period(UpperCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : List[Any] = deque(UpperCamelCase )
while True:
try:
lowerCAmelCase__ : Union[str, Any] = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ : List[str] = list(filter(lambda UpperCamelCase : not t.startswith("""@highlight""" ) , UpperCamelCase ) )
return story_lines, summary_lines
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : int = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if len(UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(UpperCamelCase )) )
return sequence
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = torch.ones_like(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = sequence == pad_token_id
lowerCAmelCase__ : Dict = 0
return mask
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [tokenizer.encode(UpperCamelCase ) for line in story_lines]
lowerCAmelCase__ : Tuple = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ : Optional[int] = [tokenizer.encode(UpperCamelCase ) for line in summary_lines]
lowerCAmelCase__ : List[str] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = []
for sequence in batch:
lowerCAmelCase__ : Optional[int] = -1
lowerCAmelCase__ : Optional[Any] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(UpperCamelCase )
return torch.tensor(UpperCamelCase )
| 37 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'efficientnet'
def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-5
| 289 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[int]=18 , __lowerCamelCase : Any=30 , __lowerCamelCase : List[str]=400 , __lowerCamelCase : int=True , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=True , ):
UpperCamelCase :Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase :Optional[int] = parent
UpperCamelCase :str = batch_size
UpperCamelCase :List[Any] = num_channels
UpperCamelCase :List[str] = image_size
UpperCamelCase :List[Any] = min_resolution
UpperCamelCase :List[Any] = max_resolution
UpperCamelCase :Any = do_resize
UpperCamelCase :Dict = size
UpperCamelCase :Dict = apply_ocr
def _A ( self : Optional[int] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _A ( self : Union[str, Any] ):
UpperCamelCase :Any = LayoutLMvaImageProcessingTester(self )
@property
def _A ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Union[str, Any] ):
UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """apply_ocr""" ) )
def _A ( self : List[str] ):
UpperCamelCase :List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCamelCase :Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _A ( self : List[Any] ):
pass
def _A ( self : Any ):
# Initialize image_processing
UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __lowerCamelCase )
self.assertIsInstance(encoding.boxes , __lowerCamelCase )
# Test batched
UpperCamelCase :Optional[Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _A ( self : Dict ):
# Initialize image_processing
UpperCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase :int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :Tuple = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _A ( self : int ):
# Initialize image_processing
UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :Dict = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _A ( self : List[Any] ):
# with apply_OCR = True
UpperCamelCase :Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase :int = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
UpperCamelCase :Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
UpperCamelCase :Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase :Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
UpperCamelCase :List[str] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCamelCase )
self.assertListEqual(encoding.boxes , __lowerCamelCase )
# with apply_OCR = False
UpperCamelCase :List[Any] = LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase )
UpperCamelCase :Tuple = image_processing(__lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 38 | """simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_hidden_groups
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Union[str, Any] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ):
_UpperCAmelCase = AlbertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case : Tuple = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Dict = True
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = AlbertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
| 289 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
_a = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
_a = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCamelCase ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="auto" , UpperCAmelCase=-1 , UpperCAmelCase=0.9 , UpperCAmelCase=5 , UpperCAmelCase=500 , UpperCAmelCase="gpt2-large" , UpperCAmelCase=-1 , UpperCAmelCase=1024 , UpperCAmelCase=25 , UpperCAmelCase=5 , UpperCAmelCase=True , UpperCAmelCase=25 , ):
"""simple docstring"""
_UpperCAmelCase = compute_mauve(
p_text=UpperCAmelCase , q_text=UpperCAmelCase , p_features=UpperCAmelCase , q_features=UpperCAmelCase , p_tokens=UpperCAmelCase , q_tokens=UpperCAmelCase , num_buckets=UpperCAmelCase , pca_max_data=UpperCAmelCase , kmeans_explained_var=UpperCAmelCase , kmeans_num_redo=UpperCAmelCase , kmeans_max_iter=UpperCAmelCase , featurize_model_name=UpperCAmelCase , device_id=UpperCAmelCase , max_text_length=UpperCAmelCase , divergence_curve_discretization_size=UpperCAmelCase , mauve_scaling_factor=UpperCAmelCase , verbose=UpperCAmelCase , seed=UpperCAmelCase , )
return out
| 39 | """simple docstring"""
UpperCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Return True if there is node that has not iterated.
_UpperCAmelCase = [False] * len(lowercase )
_UpperCAmelCase = [s]
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [-1] * (len(lowercase ))
_UpperCAmelCase = 0
_UpperCAmelCase = []
_UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = float("""Inf""" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(lowercase ,graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 289 | 0 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowercase = NewType("""DataClass""", Any)
__lowercase = NewType("""DataClassType""", Any)
def lowercase ( A_ )-> List[str]:
'''simple docstring'''
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowercase ( A_ )-> Callable[[str], Any]:
'''simple docstring'''
a : Tuple = {str(A_ ): choice for choice in choices}
return lambda A_ : str_to_choice.get(A_ , A_ )
def lowercase ( *,
A_ = None , A_ = None , A_ = dataclasses.MISSING , A_ = dataclasses.MISSING , A_ = None , **A_ , )-> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
a : List[Any] = {}
if aliases is not None:
a : Optional[Any] = aliases
if help is not None:
a : Optional[int] = help
return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ )
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Iterable[DataClassType]
def __init__( self : Optional[int] , __UpperCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **__UpperCAmelCase : Union[str, Any]):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
a : Optional[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**__UpperCAmelCase)
if dataclasses.is_dataclass(__UpperCAmelCase):
a : List[Any] = [dataclass_types]
a : Optional[int] = list(__UpperCAmelCase)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__UpperCAmelCase)
@staticmethod
def __snake_case ( __UpperCAmelCase : ArgumentParser , __UpperCAmelCase : dataclasses.Field):
a : List[str] = f'''--{field.name}'''
a : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __UpperCAmelCase):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default")
a : Optional[Any] = kwargs.pop("aliases" , [])
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Dict = [aliases]
a : Union[str, Any] = getattr(field.type , "__origin__" , field.type)
if origin_type is Union or (hasattr(__UpperCAmelCase , "UnionType") and isinstance(__UpperCAmelCase , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(__UpperCAmelCase) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''')
if type(__UpperCAmelCase) not in field.type.__args__:
# filter `str` in Union
a : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
a : List[str] = getattr(field.type , "__origin__" , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
a : str = (
field.type.__args__[0] if isinstance(__UpperCAmelCase , field.type.__args__[1]) else field.type.__args__[1]
)
a : str = getattr(field.type , "__origin__" , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
a : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , __UpperCAmelCase) and issubclass(field.type , __UpperCAmelCase)):
if origin_type is Literal:
a : Dict = field.type.__args__
else:
a : Dict = [x.value for x in field.type]
a : int = make_choice_type_function(kwargs["choices"])
if field.default is not dataclasses.MISSING:
a : Optional[Any] = field.default
else:
a : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
a : int = copy(__UpperCAmelCase)
# Hack because type=bool in argparse does not behave as we want.
a : Optional[int] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
a : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
a : Optional[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
a : Any = "?"
# This is the value that will get picked if we do --field_name (without value)
a : List[Any] = True
elif isclass(__UpperCAmelCase) and issubclass(__UpperCAmelCase , __UpperCAmelCase):
a : List[str] = field.type.__args__[0]
a : Optional[int] = "+"
if field.default_factory is not dataclasses.MISSING:
a : List[Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
a : Optional[Any] = True
else:
a : Tuple = field.type
if field.default is not dataclasses.MISSING:
a : List[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
a : str = field.default_factory()
else:
a : List[str] = True
parser.add_argument(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
a : Any = False
parser.add_argument(f'''--no_{field.name}''' , action="store_false" , dest=field.name , **__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : DataClassType):
if hasattr(__UpperCAmelCase , "_argument_group_name"):
a : Tuple = self.add_argument_group(dtype._argument_group_name)
else:
a : Union[str, Any] = self
try:
a : Dict[str, type] = get_type_hints(__UpperCAmelCase)
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)")
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__UpperCAmelCase):
a : Tuple = ".".join(map(__UpperCAmelCase , sys.version_info[:3]))
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`.") from ex
raise
for field in dataclasses.fields(__UpperCAmelCase):
if not field.init:
continue
a : List[Any] = type_hints[field.name]
self._parse_dataclass_field(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : str , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
a : Optional[Any] = []
if args_filename:
args_files.append(Path(__UpperCAmelCase))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix(".args"))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
a : List[str] = ArgumentParser()
args_file_parser.add_argument(__UpperCAmelCase , type=__UpperCAmelCase , action="append")
# Use only remaining args for further parsing (remove the args_file_flag)
a , a : str = args_file_parser.parse_known_args(args=__UpperCAmelCase)
a : List[str] = vars(__UpperCAmelCase).get(args_file_flag.lstrip("-") , __UpperCAmelCase)
if cmd_args_file_paths:
args_files.extend([Path(__UpperCAmelCase) for p in cmd_args_file_paths])
a : int = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
a : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
a , a : Tuple = self.parse_known_args(args=__UpperCAmelCase)
a : str = []
for dtype in self.dataclass_types:
a : Union[str, Any] = {f.name for f in dataclasses.fields(__UpperCAmelCase) if f.init}
a : List[Any] = {k: v for k, v in vars(__UpperCAmelCase).items() if k in keys}
for k in keys:
delattr(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = dtype(**__UpperCAmelCase)
outputs.append(__UpperCAmelCase)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(__UpperCAmelCase)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''')
return (*outputs,)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Dict[str, Any] , __UpperCAmelCase : bool = False):
a : List[str] = set(args.keys())
a : Optional[Any] = []
for dtype in self.dataclass_types:
a : Optional[int] = {f.name for f in dataclasses.fields(__UpperCAmelCase) if f.init}
a : Tuple = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
a : Any = dtype(**__UpperCAmelCase)
outputs.append(__UpperCAmelCase)
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(__UpperCAmelCase)}''')
return tuple(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : bool = False):
with open(Path(__UpperCAmelCase) , encoding="utf-8") as open_json_file:
a : Any = json.loads(open_json_file.read())
a : Optional[Any] = self.parse_dict(__UpperCAmelCase , allow_extra_keys=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : bool = False):
a : Any = self.parse_dict(yaml.safe_load(Path(__UpperCAmelCase).read_text()) , allow_extra_keys=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
| 40 | """simple docstring"""
import math
class a :
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ):
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCAmelCase ( ):
"""simple docstring"""
# Training Examples ( m, n )
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 289 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_A : str ={
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | """simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = MPNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_snake_case : Union[str, Any] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : int = False
_snake_case : List[Any] = True
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MPNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 289 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase : Union[str, Any] = pd.read_csv("sample_data.csv", header=None)
lowercase : Tuple = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase : Any = df.iloc[:, 1:2]
lowercase : Optional[int] = actual_data.values.reshape(len_data, 1)
lowercase : Union[str, Any] = MinMaxScaler().fit_transform(actual_data)
lowercase : List[str] = 10
lowercase : Optional[Any] = 5
lowercase : int = 20
lowercase : Union[str, Any] = len_data - periods * look_back
lowercase : List[str] = actual_data[:division]
lowercase : Optional[int] = actual_data[division - look_back :]
lowercase , lowercase : List[Any] = [], []
lowercase , lowercase : List[str] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase : Any = np.array(train_x)
lowercase : List[str] = np.array(test_x)
lowercase : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
lowercase : Optional[Any] = np.array([list(i.ravel()) for i in test_y])
lowercase : Optional[int] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
lowercase : int = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowercase : List[Any] = model.predict(x_test)
| 42 | """simple docstring"""
UpperCAmelCase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = from_type.lower().strip("""s""" )
_UpperCAmelCase = to_type.lower().strip("""s""" )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 ,lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowercase = logging.get_logger(__name__)
# General docstring
__lowercase = '''RegNetConfig'''
# Base docstring
__lowercase = '''facebook/regnet-y-040'''
__lowercase = [1, 1088, 7, 7]
# Image classification docstring
__lowercase = '''facebook/regnet-y-040'''
__lowercase = '''tabby, tabby cat'''
__lowercase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = 3 , __lowercase = 1 , __lowercase = 1 , __lowercase = "relu" , **__lowercase , ) -> Dict:
super().__init__(**__lowercase)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase :Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__UpperCamelCase :Any = tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=__lowercase , strides=__lowercase , padding='''VALID''' , groups=__lowercase , use_bias=__lowercase , name='''convolution''' , )
__UpperCamelCase :Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''')
__UpperCamelCase :List[str] = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Optional[int] = self.convolution(self.padding(__lowercase))
__UpperCamelCase :Optional[int] = self.normalization(__lowercase)
__UpperCamelCase :Tuple = self.activation(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , **__lowercase) -> int:
super().__init__(**__lowercase)
__UpperCamelCase :Tuple = config.num_channels
__UpperCamelCase :Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def UpperCamelCase__ ( self , __lowercase) -> Dict:
__UpperCamelCase :Dict = shape_list(__lowercase)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase :Dict = tf.transpose(__lowercase , perm=(0, 2, 3, 1))
__UpperCamelCase :Optional[int] = self.embedder(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = 2 , **__lowercase) -> Optional[Any]:
super().__init__(**__lowercase)
__UpperCamelCase :Union[str, Any] = tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=1 , strides=__lowercase , use_bias=__lowercase , name='''convolution''')
__UpperCamelCase :int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''')
def UpperCamelCase__ ( self , __lowercase , __lowercase = False) -> tf.Tensor:
return self.normalization(self.convolution(__lowercase) , training=__lowercase)
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , **__lowercase) -> Union[str, Any]:
super().__init__(**__lowercase)
__UpperCamelCase :int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''')
__UpperCamelCase :List[Any] = [
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''relu''' , name='''attention.0'''),
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2'''),
]
def UpperCamelCase__ ( self , __lowercase) -> Any:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase :List[str] = self.pooler(__lowercase)
for layer_module in self.attention:
__UpperCamelCase :List[str] = layer_module(__lowercase)
__UpperCamelCase :int = hidden_state * pooled
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 1 , **__lowercase) -> Tuple:
super().__init__(**__lowercase)
__UpperCamelCase :str = in_channels != out_channels or stride != 1
__UpperCamelCase :Any = max(1 , out_channels // config.groups_width)
__UpperCamelCase :Optional[Any] = (
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase :Optional[int] = [
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0'''),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1'''),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.2'''),
]
__UpperCamelCase :Dict = ACTaFN[config.hidden_act]
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase :Optional[int] = hidden_state
for layer_module in self.layers:
__UpperCamelCase :Any = layer_module(__lowercase)
__UpperCamelCase :Tuple = self.shortcut(__lowercase)
hidden_state += residual
__UpperCamelCase :Optional[Any] = self.activation(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 1 , **__lowercase) -> Union[str, Any]:
super().__init__(**__lowercase)
__UpperCamelCase :int = in_channels != out_channels or stride != 1
__UpperCamelCase :Optional[int] = max(1 , out_channels // config.groups_width)
__UpperCamelCase :Dict = (
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''')
)
__UpperCamelCase :Dict = [
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0'''),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1'''),
TFRegNetSELayer(__lowercase , reduced_channels=int(round(in_channels / 4)) , name='''layer.2'''),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.3'''),
]
__UpperCamelCase :Dict = ACTaFN[config.hidden_act]
def UpperCamelCase__ ( self , __lowercase) -> Optional[int]:
__UpperCamelCase :str = hidden_state
for layer_module in self.layers:
__UpperCamelCase :Optional[Any] = layer_module(__lowercase)
__UpperCamelCase :List[Any] = self.shortcut(__lowercase)
hidden_state += residual
__UpperCamelCase :List[str] = self.activation(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 2 , __lowercase = 2 , **__lowercase) -> int:
super().__init__(**__lowercase)
__UpperCamelCase :List[Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__UpperCamelCase :List[str] = [
# downsampling is done in the first layer with stride of 2
layer(__lowercase , __lowercase , __lowercase , stride=__lowercase , name='''layers.0'''),
*[layer(__lowercase , __lowercase , __lowercase , name=f"""layers.{i+1}""") for i in range(depth - 1)],
]
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
for layer_module in self.layers:
__UpperCamelCase :Tuple = layer_module(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , **__lowercase) -> str:
super().__init__(**__lowercase)
__UpperCamelCase :Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ))
__UpperCamelCase :str = zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase , config.depths[1:])):
self.stages.append(TFRegNetStage(__lowercase , __lowercase , __lowercase , depth=__lowercase , name=f"""stages.{i+1}"""))
def UpperCamelCase__ ( self , __lowercase , __lowercase = False , __lowercase = True) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase :Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase :Tuple = hidden_states + (hidden_state,)
__UpperCamelCase :Optional[Any] = stage_module(__lowercase)
if output_hidden_states:
__UpperCamelCase :Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase)
@keras_serializable
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
a__ : Optional[Any] = RegNetConfig
def __init__( self , __lowercase , **__lowercase) -> Union[str, Any]:
super().__init__(**__lowercase)
__UpperCamelCase :List[str] = config
__UpperCamelCase :List[Any] = TFRegNetEmbeddings(__lowercase , name='''embedder''')
__UpperCamelCase :Any = TFRegNetEncoder(__lowercase , name='''encoder''')
__UpperCamelCase :Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''')
@unpack_inputs
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase :Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase :Union[str, Any] = self.embedder(__lowercase , training=__lowercase)
__UpperCamelCase :Any = self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase)
__UpperCamelCase :List[str] = encoder_outputs[0]
__UpperCamelCase :List[str] = self.pooler(__lowercase)
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase :str = tf.transpose(__lowercase , perm=(0, 3, 1, 2))
__UpperCamelCase :List[Any] = tf.transpose(__lowercase , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase :List[str] = tuple([tf.transpose(__lowercase , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = RegNetConfig
a__ : List[str] = """regnet"""
a__ : Optional[int] = """pixel_values"""
@property
def UpperCamelCase__ ( self) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa)}
__lowercase = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowercase = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , *__lowercase , **__lowercase) -> List[Any]:
super().__init__(__lowercase , *__lowercase , **__lowercase)
__UpperCamelCase :Tuple = TFRegNetMainLayer(__lowercase , name='''regnet''')
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase :int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase :Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase :List[Any] = self.regnet(
pixel_values=__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , *__lowercase , **__lowercase) -> int:
super().__init__(__lowercase , *__lowercase , **__lowercase)
__UpperCamelCase :Optional[Any] = config.num_labels
__UpperCamelCase :str = TFRegNetMainLayer(__lowercase , name='''regnet''')
# classification head
__UpperCamelCase :List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase__ ( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase :Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase :int = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase :Union[str, Any] = self.regnet(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase)
__UpperCamelCase :List[str] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase :str = self.classifier[0](__lowercase)
__UpperCamelCase :List[Any] = self.classifier[1](__lowercase)
__UpperCamelCase :Tuple = None if labels is None else self.hf_compute_loss(labels=__lowercase , logits=__lowercase)
if not return_dict:
__UpperCamelCase :Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states)
| 43 | """simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
_UpperCAmelCase = 2
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 289 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Optional[Any] = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = "codegen"
_UpperCamelCase : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a__=50400 , a__=2048 , a__=2048 , a__=4096 , a__=28 , a__=16 , a__=64 , a__=None , a__="gelu_new" , a__=0.0 , a__=0.0 , a__=0.0 , a__=1e-5 , a__=0.0_2 , a__=True , a__=50256 , a__=50256 , a__=False , **a__ , ):
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[Any] = n_ctx
_lowerCAmelCase : Dict = n_positions
_lowerCAmelCase : Any = n_embd
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : List[Any] = n_inner
_lowerCAmelCase : int = rotary_dim
_lowerCAmelCase : List[str] = activation_function
_lowerCAmelCase : List[str] = resid_pdrop
_lowerCAmelCase : Union[str, Any] = embd_pdrop
_lowerCAmelCase : List[str] = attn_pdrop
_lowerCAmelCase : Union[str, Any] = layer_norm_epsilon
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Union[str, Any] = bos_token_id
_lowerCAmelCase : Optional[int] = eos_token_id
super().__init__(
bos_token_id=a__ , eos_token_id=a__ , tie_word_embeddings=a__ , **a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = "default" , a__ = None , a__ = False , ):
super().__init__(a__ , task=a__ , patching_specs=a__ , use_past=a__ )
if not getattr(self._config , """pad_token_id""" , a__ ):
# TODO: how to do that better?
_lowerCAmelCase : List[str] = 0
@property
def __A ( self ):
_lowerCAmelCase : Tuple = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="""inputs""" )
_lowerCAmelCase : str = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_lowerCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ):
return self._config.n_layer
@property
def __A ( self ):
return self._config.n_head
def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
_lowerCAmelCase : Any = super(a__ , self ).generate_dummy_inputs(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Optional[int] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Any = [
(torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
_lowerCAmelCase : str = ordered_inputs["""attention_mask"""].dtype
_lowerCAmelCase : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self ):
return 13
| 44 | """simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = len(lowerCAmelCase__ )
__a = sum(lowerCAmelCase__ )
__a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__a = True
for i in range(1 , s + 1 ):
__a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__a = dp[i][j - 1]
if arr[i - 1] <= j:
__a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__a = s - 2 * j
break
return diff
| 45 | """simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = UNetaDModel
_snake_case : List[str] = 'sample'
@property
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : List[Any] ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = UNetaDModel
_snake_case : Optional[Any] = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self : Dict ):
return (4, 32, 32)
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : str ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model_accelerate.to(__lowerCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
_UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase )
model_normal_load.to(__lowerCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__lowerCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) )
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[Any] = UNetaDModel
_snake_case : str = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Any ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**__lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : List[str] ):
# not required for this model
pass
| 289 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 | """simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = StableUnCLIPPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 32
_UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 289 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> int: # noqa: E741
"""simple docstring"""
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =[0] * n
_SCREAMING_SNAKE_CASE =[False] * n
_SCREAMING_SNAKE_CASE =[False] * n
def dfs(_UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] ):
if parent == root:
out_edge_count += 1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_SCREAMING_SNAKE_CASE =dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_SCREAMING_SNAKE_CASE =True
# AP found via cycle
if at == low[to]:
_SCREAMING_SNAKE_CASE =True
else:
_SCREAMING_SNAKE_CASE =min(low[at] , _UpperCamelCase )
return out_edge_count
for i in range(_UpperCamelCase ):
if not visited[i]:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =dfs(_UpperCamelCase , _UpperCamelCase , -1 , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =out_edge_count > 1
for x in range(len(_UpperCamelCase ) ):
if is_art[x] is True:
print(_UpperCamelCase )
# Adjacency list of graph
lowerCamelCase : int = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 47 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289 | 0 |
from __future__ import annotations
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ) -> None:
lowerCamelCase : List[Any] = order
# a_{0} ... a_{k}
lowerCamelCase : Any = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase : Union[str, Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase : int = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase : Union[str, Any] = [0.0] * self.order
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
if len(UpperCamelCase__ ) < self.order:
lowerCamelCase : Dict = [1.0, *a_coeffs]
if len(UpperCamelCase__ ) != self.order + 1:
lowerCamelCase : str = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(UpperCamelCase__ )}'''
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != self.order + 1:
lowerCamelCase : Optional[int] = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(UpperCamelCase__ )}'''
)
raise ValueError(UpperCamelCase__ )
lowerCamelCase : List[Any] = a_coeffs
lowerCamelCase : str = b_coeffs
def _lowercase ( self , UpperCamelCase__ ) -> float:
lowerCamelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase : Dict = self.input_history[:-1]
lowerCamelCase : int = self.output_history[:-1]
lowerCamelCase : List[Any] = sample
lowerCamelCase : Dict = result
return result
| 48 | """simple docstring"""
import requests
UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/"""
def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 289 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
__snake_case :Tuple = TypeVar('''T''')
__snake_case :int = Union[List[T], Tuple[T, ...]]
__snake_case :Union[str, Any] = Union[T, List[T], Dict[str, T]]
__snake_case :str = Union[str, bytes, os.PathLike]
| 49 | """simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_failure_array(lowercase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(lowercase ):
if pattern[j] == text[i]:
if j == (len(lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while j < len(lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(lowercase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase__ = """abc1abc12"""
UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase__ = """ABABX"""
UpperCAmelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase__ = """AAAB"""
UpperCAmelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase__ = """abcdabcy"""
UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 289 | 0 |
from PIL import Image
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Image:
def brightness(_UpperCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
_UpperCAmelCase : Optional[Any] = change_brightness(img, 1_00)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 50 | """simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
UpperCAmelCase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
UpperCAmelCase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ):
_UpperCAmelCase = recall_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , )
return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
| 289 | 0 |
import math
def A (__A : float , __A : float ) -> float:
"""simple docstring"""
return math.pow(__A , 2 ) - a
def A (__A : float ) -> float:
"""simple docstring"""
return 2 * x
def A (__A : float ) -> float:
"""simple docstring"""
UpperCAmelCase_ = 2.0
while start <= a:
UpperCAmelCase_ = math.pow(__A , 2 )
return start
def A (__A : float , __A : int = 9999 , __A : float = 0.00_000_000_000_001 ) -> float:
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase_ = get_initial_point(__A )
for _ in range(__A ):
UpperCAmelCase_ = value
UpperCAmelCase_ = value - fx(__A , __A ) / fx_derivative(__A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 51 | """simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a :
_snake_case : Tuple = PegasusConfig
_snake_case : int = {}
_snake_case : str = 'gelu'
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,):
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Optional[Any] = True
_snake_case : List[str] = False
_snake_case : Dict = False
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase )
_UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 289 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCamelCase : Union[str, Any] = ksize + 1
UpperCamelCase : str = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCAmelCase ):
for x in range(_lowerCAmelCase ):
# distance from center
UpperCamelCase : Any = x - ksize // 2
UpperCamelCase : Any = y - ksize // 2
# degree to radiant
UpperCamelCase : int = theta / 180 * np.pi
UpperCamelCase : List[Any] = np.cos(_theta )
UpperCamelCase : Optional[Any] = np.sin(_theta )
# get kernel x
UpperCamelCase : Dict = cos_theta * px + sin_theta * py
# get kernel y
UpperCamelCase : Optional[Any] = -sin_theta * px + cos_theta * py
# fill kernel
UpperCamelCase : Optional[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase : Union[str, Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase : Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__lowerCamelCase : Optional[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase : Tuple = out / out.max() * 255
__lowerCamelCase : Optional[Any] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 52 | """simple docstring"""
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 2
_UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment
_UpperCAmelCase = [True] * (end + 1)
_UpperCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(lowercase )
for i in range(start * start ,end + 1 ,lowercase ):
_UpperCAmelCase = False
start += 1
prime += in_prime
_UpperCAmelCase = end + 1
_UpperCAmelCase = min(2 * end ,lowercase )
while low <= n:
_UpperCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_UpperCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowercase ,high + 1 ,lowercase ):
_UpperCAmelCase = False
for j in range(len(lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_UpperCAmelCase = high + 1
_UpperCAmelCase = min(high + end ,lowercase )
return prime
print(sieve(1_0**6))
| 289 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__lowercase , __lowercase ):
return 0
elif n == 2:
return 1
else:
__UpperCamelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 2
while digits < n:
index += 1
__UpperCamelCase = len(str(fibonacci(__lowercase ) ) )
return index
def lowercase__ ( __lowercase : int = 1000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__lowercase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 53 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289 | 0 |
"""simple docstring"""
from math import sqrt
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
__SCREAMING_SNAKE_CASE = True
# 0 and 1 are none primes.
if number <= 1:
__SCREAMING_SNAKE_CASE = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__SCREAMING_SNAKE_CASE = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__SCREAMING_SNAKE_CASE = list(range(2 , n + 1 ) )
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__SCREAMING_SNAKE_CASE = 0
# filters actual prime numbers.
__SCREAMING_SNAKE_CASE = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
__SCREAMING_SNAKE_CASE = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = [] # this list will be returns of the function.
# potential prime number factors.
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
__SCREAMING_SNAKE_CASE = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__SCREAMING_SNAKE_CASE = get_prime_numbers(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
# run variable for while-loops.
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
# exit variable. for break up the loops
__SCREAMING_SNAKE_CASE = True
while i < len_pn and loop:
__SCREAMING_SNAKE_CASE = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__SCREAMING_SNAKE_CASE = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 0
while numbera != 0:
__SCREAMING_SNAKE_CASE = numbera % numbera
__SCREAMING_SNAKE_CASE = numbera
__SCREAMING_SNAKE_CASE = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__SCREAMING_SNAKE_CASE = p_number_a + 1 # jump to the next number
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
__SCREAMING_SNAKE_CASE = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
__SCREAMING_SNAKE_CASE = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__SCREAMING_SNAKE_CASE = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
__SCREAMING_SNAKE_CASE = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1 # this will be return
for _ in range(n - 1 ):
__SCREAMING_SNAKE_CASE = ans
ans += fiba
__SCREAMING_SNAKE_CASE = tmp
return ans
| 54 | """simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def run_func(lowercase ):
@wraps(lowercase )
def run_in_eager_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
@wraps(lowercase )
@tf.function(experimental_compile=lowercase )
def run_in_graph_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class a ( lowerCAmelCase_ ):
_snake_case : TensorFlowBenchmarkArguments
_snake_case : PretrainedConfig
_snake_case : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCAmelCase , training=__lowerCAmelCase )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
__lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_UpperCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_UpperCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(__lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase )
_UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 289 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[str] = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class snake_case ( lowercase , lowercase ):
"""simple docstring"""
_lowerCamelCase = "nat"
_lowerCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , UpperCamelCase=4 , UpperCamelCase=3 , UpperCamelCase=64 , UpperCamelCase=[3, 4, 6, 5] , UpperCamelCase=[2, 4, 8, 16] , UpperCamelCase=7 , UpperCamelCase=3.0 , UpperCamelCase=True , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase=0.0 , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(UpperCamelCase )
lowerCamelCase_ = num_heads
lowerCamelCase_ = kernel_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) )
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase ) + 1 )]
lowerCamelCase_ ,lowerCamelCase_ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase , out_indices=UpperCamelCase , stage_names=self.stage_names )
| 55 | """simple docstring"""
from math import pow
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCAmelCase = int(pow(lowercase ,lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
return current_sum, solutions_count
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a : Tuple = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
snake_case_ = ["pixel_values"]
def __init__( self : List[str] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowercase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowercase_ : Union[str, Any] , ):
super().__init__(**lowercase_ )
snake_case_ = size if size is not None else {'''shortest_edge''': 224}
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case_ = int((256 / 224) * size['''shortest_edge'''] )
snake_case_ = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
snake_case_ = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowercase_ , size=(size_dict['''height'''], size_dict['''width''']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
snake_case_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ):
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : ImageInput , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Dict[str, int]] = None , lowercase_ : PILImageResampling = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Dict[str, int]] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[float] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[float, Iterable[float]]] = None , lowercase_ : Optional[Union[float, Iterable[float]]] = None , lowercase_ : Optional[TensorType] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : int , ):
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
snake_case_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
snake_case_ = [self.resize(lowercase_ , lowercase_ , lowercase_ ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(lowercase_ , lowercase_ ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(lowercase_ , lowercase_ ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(lowercase_ , lowercase_ , lowercase_ ) for image in images]
snake_case_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
snake_case_ = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 56 | """simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
UpperCAmelCase__ = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = EfficientNetConfig()
_UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = 10_00
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,)
return preprocessor
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase = sorted(set(lowercase ) )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )}
_UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase = """efficientnet.""" + item[1]
_UpperCAmelCase = """classifier.weight"""
_UpperCAmelCase = """classifier.bias"""
return key_mapping
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
_UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) )
else:
_UpperCAmelCase = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = model_classes[model_name](
include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,)
_UpperCAmelCase = original_model.trainable_variables
_UpperCAmelCase = original_model.non_trainable_variables
_UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase = param.numpy()
_UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase = get_efficientnet_config(lowercase )
_UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval()
_UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase = rename_keys(lowercase )
replace_params(lowercase ,lowercase ,lowercase )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase = convert_image_processor(lowercase )
_UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase = hf_model(**lowercase )
_UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase = False
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
_UpperCAmelCase = image.img_to_array(lowercase )
_UpperCAmelCase = np.expand_dims(lowercase ,axis=0 )
_UpperCAmelCase = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
UpperCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 289 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase = update_area_of_max_square(_UpperCamelCase , col + 1 )
__lowerCAmelCase = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
__lowerCAmelCase = 1 + min([right, diagonal, down] )
__lowerCAmelCase = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
__lowerCAmelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
__lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
__lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
__lowerCAmelCase = 1 + min([right, diagonal, down] )
__lowerCAmelCase = max(largest_square_area[0] , _UpperCamelCase )
__lowerCAmelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase = [0]
__lowerCAmelCase = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase = dp_array[row][col + 1]
__lowerCAmelCase = dp_array[row + 1][col + 1]
__lowerCAmelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = max(dp_array[row][col] , _UpperCamelCase )
else:
__lowerCAmelCase = 0
return largest_square_area
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [0] * (cols + 1)
__lowerCAmelCase = [0] * (cols + 1)
__lowerCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase = current_row[col + 1]
__lowerCAmelCase = next_row[col + 1]
__lowerCAmelCase = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = max(current_row[col] , _UpperCamelCase )
else:
__lowerCAmelCase = 0
__lowerCAmelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 57 | """simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a :
def __init__( self : Union[str, Any] ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
_UpperCAmelCase = probability
def lowerCAmelCase_ ( self : Optional[Any] ):
return list(self.connections )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = 0
_UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = Counter(graph.get_nodes() )
_UpperCAmelCase = start
for _ in range(lowercase ):
_UpperCAmelCase = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase ( __lowerCamelCase : Tuple ) ->Tuple:
_SCREAMING_SNAKE_CASE = fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , __lowerCamelCase ).groups()[0]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A=None , A=None ) -> int:
_SCREAMING_SNAKE_CASE = file_names
_SCREAMING_SNAKE_CASE = image_transform
_SCREAMING_SNAKE_CASE = label_to_id
def __len__( self ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.file_names[idx]
_SCREAMING_SNAKE_CASE = PIL.Image.open(A )
_SCREAMING_SNAKE_CASE = raw_image.convert("""RGB""" )
if self.image_transform is not None:
_SCREAMING_SNAKE_CASE = self.image_transform(A )
_SCREAMING_SNAKE_CASE = extract_label(A )
if self.label_to_id is not None:
_SCREAMING_SNAKE_CASE = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) ->str:
# Initialize accelerator
if args.with_tracking:
_SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
_SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["""lr"""]
_SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE = config["""image_size"""]
if not isinstance(__lowerCamelCase , (list, tuple) ):
_SCREAMING_SNAKE_CASE = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
_SCREAMING_SNAKE_CASE = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_SCREAMING_SNAKE_CASE = int(args.checkpointing_steps )
else:
raise ValueError(
F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_SCREAMING_SNAKE_CASE = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_SCREAMING_SNAKE_CASE = os.path.split(__lowerCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__lowerCamelCase , __lowerCamelCase )
# Grab all the image filenames
_SCREAMING_SNAKE_CASE = [os.path.join(args.data_dir , __lowerCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
_SCREAMING_SNAKE_CASE = [extract_label(__lowerCamelCase ) for fname in file_names]
_SCREAMING_SNAKE_CASE = list(set(__lowerCamelCase ) )
id_to_label.sort()
_SCREAMING_SNAKE_CASE = {lbl: i for i, lbl in enumerate(__lowerCamelCase )}
# Set the seed before splitting the data.
np.random.seed(__lowerCamelCase )
torch.manual_seed(__lowerCamelCase )
torch.cuda.manual_seed_all(__lowerCamelCase )
# Split our filenames between train and validation
_SCREAMING_SNAKE_CASE = np.random.permutation(len(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = int(0.8 * len(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = random_perm[:cut]
_SCREAMING_SNAKE_CASE = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_SCREAMING_SNAKE_CASE = Compose([RandomResizedCrop(__lowerCamelCase , scale=(0.5, 1.0) ), ToTensor()] )
_SCREAMING_SNAKE_CASE = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase )
# For evaluation, we use a deterministic Resize
_SCREAMING_SNAKE_CASE = Compose([Resize(__lowerCamelCase ), ToTensor()] )
_SCREAMING_SNAKE_CASE = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
_SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = create_model("""resnet50d""" , pretrained=__lowerCamelCase , num_classes=len(__lowerCamelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_SCREAMING_SNAKE_CASE = False
for param in model.get_classifier().parameters():
_SCREAMING_SNAKE_CASE = True
# We normalize the batches of images to be a bit faster.
_SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
_SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_SCREAMING_SNAKE_CASE = OneCycleLR(optimizer=__lowerCamelCase , max_lr=__lowerCamelCase , epochs=__lowerCamelCase , steps_per_epoch=len(__lowerCamelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the starting epoch so files are named properly
_SCREAMING_SNAKE_CASE = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_SCREAMING_SNAKE_CASE = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_SCREAMING_SNAKE_CASE = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_SCREAMING_SNAKE_CASE = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_SCREAMING_SNAKE_CASE = os.path.splitext(__lowerCamelCase )[0]
if "epoch" in training_difference:
_SCREAMING_SNAKE_CASE = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
_SCREAMING_SNAKE_CASE = None
else:
_SCREAMING_SNAKE_CASE = int(training_difference.replace("""step_""" , """""" ) )
_SCREAMING_SNAKE_CASE = resume_step // len(__lowerCamelCase )
resume_step -= starting_epoch * len(__lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
if args.with_tracking:
_SCREAMING_SNAKE_CASE = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_SCREAMING_SNAKE_CASE = accelerator.skip_first_batches(__lowerCamelCase , __lowerCamelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_SCREAMING_SNAKE_CASE = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
_SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.nn.functional.cross_entropy(__lowerCamelCase , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = F'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
_SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = outputs.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
_SCREAMING_SNAKE_CASE = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_SCREAMING_SNAKE_CASE = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(__lowerCamelCase ),
"""epoch""": epoch,
} , step=__lowerCamelCase , )
if checkpointing_steps == "epoch":
_SCREAMING_SNAKE_CASE = F'epoch_{epoch}'
if args.output_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase ( ) ->int:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=__lowerCamelCase , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=__lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__lowerCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 58 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCAmelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 289 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[List[List[str]]] , snake_case__ : List[List[str]] , snake_case__ : int = 1 , snake_case__ : int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case__ , hypotheses=snake_case__ , min_len=snake_case__ , max_len=snake_case__ )
}
| 59 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'efficientnet'
def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-5
| 289 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list ):
for i in range(len(_snake_case ) - 1 , 0 , -1 ):
lowerCAmelCase : int = False
for j in range(_snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase, lowerCAmelCase : Tuple = unsorted[j - 1], unsorted[j]
lowerCAmelCase : Optional[Any] = True
for j in range(_snake_case ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase, lowerCAmelCase : Any = unsorted[j + 1], unsorted[j]
lowerCAmelCase : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : str = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 60 | """simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_hidden_groups
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Union[str, Any] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ):
_UpperCAmelCase = AlbertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case : Tuple = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Dict = True
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = AlbertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
| 289 | 0 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """align_text_model"""
def __init__( self , lowercase_=3_0522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : Dict = use_cache
UpperCAmelCase_ : Optional[int] = pad_token_id
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : int = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase_ : Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """align_vision_model"""
def __init__( self , lowercase_ = 3 , lowercase_ = 600 , lowercase_ = 2.0 , lowercase_ = 3.1 , lowercase_ = 8 , lowercase_ = [3, 3, 5, 3, 5, 5, 3] , lowercase_ = [32, 16, 24, 40, 80, 112, 192] , lowercase_ = [16, 24, 40, 80, 112, 192, 320] , lowercase_ = [] , lowercase_ = [1, 2, 2, 2, 1, 2, 1] , lowercase_ = [1, 2, 2, 3, 3, 4, 1] , lowercase_ = [1, 6, 6, 6, 6, 6, 6] , lowercase_ = 0.25 , lowercase_ = "swish" , lowercase_ = 2560 , lowercase_ = "mean" , lowercase_ = 0.02 , lowercase_ = 0.0_01 , lowercase_ = 0.99 , lowercase_ = 0.2 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Any = width_coefficient
UpperCAmelCase_ : Dict = depth_coefficient
UpperCAmelCase_ : Union[str, Any] = depth_divisor
UpperCAmelCase_ : int = kernel_sizes
UpperCAmelCase_ : Dict = in_channels
UpperCAmelCase_ : Tuple = out_channels
UpperCAmelCase_ : Optional[int] = depthwise_padding
UpperCAmelCase_ : Dict = strides
UpperCAmelCase_ : Tuple = num_block_repeats
UpperCAmelCase_ : Tuple = expand_ratios
UpperCAmelCase_ : Dict = squeeze_expansion_ratio
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dim
UpperCAmelCase_ : List[Any] = pooling_type
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : str = batch_norm_eps
UpperCAmelCase_ : List[str] = batch_norm_momentum
UpperCAmelCase_ : Any = drop_connect_rate
UpperCAmelCase_ : str = sum(lowercase_ ) * 4
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """align"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=640 , lowercase_=1.0 , lowercase_=0.02 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
if text_config is None:
UpperCAmelCase_ : Union[str, Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ : Tuple = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
UpperCAmelCase_ : Tuple = AlignTextConfig(**lowercase_ )
UpperCAmelCase_ : str = AlignVisionConfig(**lowercase_ )
UpperCAmelCase_ : List[Any] = projection_dim
UpperCAmelCase_ : Union[str, Any] = temperature_init_value
UpperCAmelCase_ : Union[str, Any] = initializer_range
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Dict = self.text_config.to_dict()
UpperCAmelCase_ : str = self.vision_config.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 61 | """simple docstring"""
UpperCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Return True if there is node that has not iterated.
_UpperCAmelCase = [False] * len(lowercase )
_UpperCAmelCase = [s]
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [-1] * (len(lowercase ))
_UpperCAmelCase = 0
_UpperCAmelCase = []
_UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = float("""Inf""" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(lowercase ,graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 289 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=4 , ) -> Union[str, Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_attention_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_choices
def _a ( self ) -> Dict:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_attention_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Tuple:
__UpperCamelCase =FlaxAlbertModelTester(self )
@slow
def _a ( self ) -> str:
for model_class_name in self.all_model_classes:
__UpperCamelCase =model_class_name.from_pretrained('albert-base-v2' )
__UpperCamelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> str:
__UpperCamelCase =FlaxAlbertModel.from_pretrained('albert-base-v2' )
__UpperCamelCase =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCamelCase =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase =model(A_ , attention_mask=A_ )[0]
__UpperCamelCase =(1, 11, 768)
self.assertEqual(output.shape , A_ )
__UpperCamelCase =np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) )
| 62 | """simple docstring"""
import math
class a :
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ):
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCAmelCase ( ):
"""simple docstring"""
# Training Examples ( m, n )
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 289 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__a ):
_a = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_a = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def UpperCamelCase__ ( self : int ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__a ):
_a = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_a = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_a = AutoTokenizer.from_pretrained(__a )
_a = FlaxBertModel.from_pretrained(__a )
_a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a : Optional[Any] ):
return model(**__a )
eval(**__a ).block_until_ready()
@slow
def UpperCamelCase__ ( self : Dict ):
for model_name in ["roberta-base", "roberta-large"]:
_a = AutoTokenizer.from_pretrained(__a )
_a = FlaxRobertaModel.from_pretrained(__a )
_a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a : str ):
return model(**__a )
eval(**__a ).block_until_ready()
def UpperCamelCase__ ( self : Any ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = FlaxAutoModel.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : int ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = FlaxAutoModel.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : Dict ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
_a = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : str ):
with self.assertRaisesRegex(__a , "Use `from_pt=True` to load this model" ):
_a = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 63 | """simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = MPNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_snake_case : Union[str, Any] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : int = False
_snake_case : List[Any] = True
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MPNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 289 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "new-model"
if is_tf_available():
class lowercase( __a ):
'''simple docstring'''
lowercase__ = NewModelConfig
@require_tf
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = """bert-base-cased"""
_snake_case : Tuple = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
_snake_case : int = TFAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = """bert-base-cased"""
_snake_case : Any = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
_snake_case : Tuple = TFAutoModelForPreTraining.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
_snake_case : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(a_ )
_snake_case , _snake_case : Dict = TFAutoModelForCausalLM.from_pretrained(a_, output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Tuple = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
_snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(a_ )
_snake_case , _snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(a_, output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
_snake_case : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a_ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(a_, output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_snake_case : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
_snake_case : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
_snake_case : List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
@slow
@require_tensorflow_probability
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_snake_case : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
_snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(a_ )
_snake_case , _snake_case : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
a_, output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_, a_ )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=a_ ), 14_410 )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_, a_ )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=a_ ), 14_410 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(a_, a_ )
_snake_case : Any = copy.deepcopy(model.config )
_snake_case : Dict = ["""FunnelBaseModel"""]
_snake_case : Optional[Any] = TFAutoModel.from_config(a_ )
self.assertIsInstance(a_, a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
_snake_case : List[str] = TFAutoModel.from_pretrained(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
try:
AutoConfig.register("""new-model""", a_ )
_snake_case : Union[str, Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_, a_ )
auto_class.register(a_, a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_, a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_snake_case : List[Any] = BertModelTester(self ).get_config()
_snake_case : Optional[int] = NewModelConfig(**tiny_config.to_dict() )
_snake_case : List[str] = auto_class.from_config(a_ )
self.assertIsInstance(a_, a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
_snake_case : Optional[Any] = auto_class.from_pretrained(a_ )
self.assertIsInstance(a_, a_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
a_, """bert-base is not a local folder and is not a valid model identifier""" ):
_snake_case : List[Any] = TFAutoModel.from_pretrained("""bert-base""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
a_, r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_snake_case : int = TFAutoModel.from_pretrained(a_, revision="""aaaaaa""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
a_, """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""", ):
_snake_case : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(a_, """Use `from_pt=True` to load this model""" ):
_snake_case : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_snake_case : Any = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
# With a sharded checkpoint
_snake_case : Dict = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
_snake_case : Optional[Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
| 64 | """simple docstring"""
UpperCAmelCase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = from_type.lower().strip("""s""" )
_UpperCAmelCase = to_type.lower().strip("""s""" )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 ,lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | """simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
_UpperCAmelCase = 2
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 289 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = s.rsplit(_lowercase, _lowercase )
return new.join(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Tuple = {}
snake_case_ :Union[str, Any] = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
snake_case_ :int = key.replace(f"""{group_key}.""", f"""{group_key}.group.""" )
if "res_path" in key:
snake_case_ :Tuple = key.replace("""res_path.""", """res_path.path.""" )
if key.endswith(""".w""" ):
snake_case_ :List[str] = rreplace(_lowercase, """.w""", """.weight""", 1 )
if key.endswith(""".b""" ):
snake_case_ :Dict = rreplace(_lowercase, """.b""", """.bias""", 1 )
snake_case_ :Union[str, Any] = value.float()
return upgrade
@torch.no_grad()
def A_ ( _lowercase, _lowercase, _lowercase=None, _lowercase=True ):
'''simple docstring'''
from dall_e import Encoder
snake_case_ :Tuple = Encoder()
if os.path.exists(_lowercase ):
snake_case_ :Any = torch.load(_lowercase )
else:
snake_case_ :str = torch.hub.load_state_dict_from_url(_lowercase )
if isinstance(_lowercase, _lowercase ):
snake_case_ :str = ckpt.state_dict()
encoder.load_state_dict(_lowercase )
if config_path is not None:
snake_case_ :Tuple = FlavaImageCodebookConfig.from_pretrained(_lowercase )
else:
snake_case_ :int = FlavaImageCodebookConfig()
snake_case_ :List[Any] = FlavaImageCodebook(_lowercase ).eval()
snake_case_ :Union[str, Any] = encoder.state_dict()
snake_case_ :List[Any] = upgrade_state_dict(_lowercase )
hf_model.load_state_dict(_lowercase )
snake_case_ :str = hf_model.state_dict()
snake_case_ :Tuple = count_parameters(_lowercase )
snake_case_ :Optional[Any] = count_parameters(_lowercase )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowercase )
else:
return hf_state_dict
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__a = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 66 | """simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289 | 0 |
'''simple docstring'''
import qiskit
def __lowerCAmelCase ( UpperCamelCase__ = 2 ) -> qiskit.result.counts.Counts:
__lowerCamelCase = qubits
# Using Aer's simulator
__lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
__lowerCamelCase = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase__ ) ) , list(range(UpperCamelCase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__lowerCamelCase = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=10_00 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}')
| 67 | """simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = UNetaDModel
_snake_case : List[str] = 'sample'
@property
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : List[Any] ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = UNetaDModel
_snake_case : Optional[Any] = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self : Dict ):
return (4, 32, 32)
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : str ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model_accelerate.to(__lowerCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
_UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase )
model_normal_load.to(__lowerCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__lowerCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) )
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[Any] = UNetaDModel
_snake_case : str = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Any ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**__lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : List[str] ):
# not required for this model
pass
| 289 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list:
'''simple docstring'''
A__ = int(SCREAMING_SNAKE_CASE_ )
if n_element < 1:
A__ = ValueError("a should be a positive number" )
raise my_error
A__ = [1]
A__ , A__ , A__ = (0, 0, 0)
A__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase__ = hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 68 | """simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = StableUnCLIPPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 32
_UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 289 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "linear"
SCREAMING_SNAKE_CASE_ = "cosine"
SCREAMING_SNAKE_CASE_ = "cosine_with_restarts"
SCREAMING_SNAKE_CASE_ = "polynomial"
SCREAMING_SNAKE_CASE_ = "constant"
SCREAMING_SNAKE_CASE_ = "constant_with_warmup"
SCREAMING_SNAKE_CASE_ = "piecewise_constant"
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = -1 ) -> Optional[Any]:
return LambdaLR(UpperCAmelCase , lambda UpperCAmelCase : 1 , last_epoch=UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = -1 ) -> Optional[Any]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1.0 , UpperCAmelCase ) )
return 1.0
return LambdaLR(UpperCAmelCase , UpperCAmelCase , last_epoch=UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = -1 ) -> List[Any]:
snake_case_ = {}
snake_case_ = step_rules.split(',' )
for rule_str in rule_list[:-1]:
snake_case_ , snake_case_ = rule_str.split(':' )
snake_case_ = int(UpperCAmelCase )
snake_case_ = float(UpperCAmelCase )
snake_case_ = value
snake_case_ = float(rule_list[-1] )
def create_rules_function(UpperCAmelCase , UpperCAmelCase ):
def rule_func(UpperCAmelCase ) -> float:
snake_case_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
snake_case_ = create_rules_function(UpperCAmelCase , UpperCAmelCase )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , last_epoch=UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=-1 ) -> Optional[int]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.5 , UpperCAmelCase = -1 ) -> Optional[Any]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = -1 ) -> Optional[int]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1e-7 , UpperCAmelCase=1.0 , UpperCAmelCase=-1 ) -> List[str]:
snake_case_ = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
snake_case_ = lr_init - lr_end
snake_case_ = num_training_steps - num_warmup_steps
snake_case_ = 1 - (current_step - num_warmup_steps) / decay_steps
snake_case_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 1.0 , UpperCAmelCase = -1 , ) -> Union[str, Any]:
snake_case_ = SchedulerType(UpperCAmelCase )
snake_case_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(UpperCAmelCase , last_epoch=UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(UpperCAmelCase , step_rules=UpperCAmelCase , last_epoch=UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(UpperCAmelCase , num_warmup_steps=UpperCAmelCase , last_epoch=UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
UpperCAmelCase , num_warmup_steps=UpperCAmelCase , num_training_steps=UpperCAmelCase , num_cycles=UpperCAmelCase , last_epoch=UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
UpperCAmelCase , num_warmup_steps=UpperCAmelCase , num_training_steps=UpperCAmelCase , power=UpperCAmelCase , last_epoch=UpperCAmelCase , )
return schedule_func(
UpperCAmelCase , num_warmup_steps=UpperCAmelCase , num_training_steps=UpperCAmelCase , last_epoch=UpperCAmelCase )
| 69 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289 | 0 |
'''simple docstring'''
import math
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if (
not isinstance(lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if (
not isinstance(lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | """simple docstring"""
import requests
UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/"""
def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 289 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =KandinskyVaaControlnetPipeline
UpperCamelCase__ : Dict =["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCamelCase__ : int =["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCamelCase__ : List[str] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase__ : Tuple =False
@property
def __lowercase ( self ):
"""simple docstring"""
return 32
@property
def __lowercase ( self ):
"""simple docstring"""
return 32
@property
def __lowercase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def __lowercase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase ( self ):
"""simple docstring"""
return 100
@property
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Tuple ={
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase : Optional[Any] =UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def __lowercase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : List[Any] =VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.dummy_unet
__UpperCamelCase : List[Any] =self.dummy_movq
__UpperCamelCase : int =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase__ , )
__UpperCamelCase : Optional[int] ={
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create hint
__UpperCamelCase : List[str] =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Optional[int] =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : List[Any] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ='cpu'
__UpperCamelCase : Tuple =self.get_dummy_components()
__UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] =output.images
__UpperCamelCase : Optional[Any] =pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
__UpperCamelCase : Tuple =image[0, -3:, -3:, -1]
__UpperCamelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : List[Any] =np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
__UpperCamelCase : Union[str, Any] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__UpperCamelCase : Any =torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 255.0
__UpperCamelCase : int =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__UpperCamelCase : Dict =KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
__UpperCamelCase : Dict =KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__UpperCamelCase : List[Any] =pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple ='A robot, 4k photo'
__UpperCamelCase : Optional[int] =torch.Generator(device='cuda' ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase : List[str] =pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__UpperCamelCase : Any =torch.Generator(device='cuda' ).manual_seed(0 )
__UpperCamelCase : str =pipeline(
image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , hint=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , output_type='np' , )
__UpperCamelCase : Optional[int] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 71 | """simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_failure_array(lowercase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(lowercase ):
if pattern[j] == text[i]:
if j == (len(lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while j < len(lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(lowercase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase__ = """abc1abc12"""
UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase__ = """ABABX"""
UpperCAmelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase__ = """AAAB"""
UpperCAmelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase__ = """abcdabcy"""
UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 289 | 0 |
"""simple docstring"""
import pprint
import requests
lowerCAmelCase__ = '''https://zenquotes.io/api'''
def snake_case_ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case_ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
lowerCAmelCase__ = random_quotes()
pprint.pprint(response)
| 72 | """simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
UpperCAmelCase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
UpperCAmelCase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ):
_UpperCAmelCase = recall_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , )
return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
| 289 | 0 |
from typing import List
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : List[Any] = {key: len(lowerCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__lowerCamelCase : Union[str, Any] = max(lists_lengths.values() , default=0 )
return max(1 , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[range]:
__lowerCamelCase : Tuple = []
for group_idx in range(lowerCamelCase__ ):
__lowerCamelCase : List[str] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__lowerCamelCase : Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__lowerCamelCase : str = range(lowerCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowerCamelCase__ )
return shards_indices_per_group
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[dict]:
__lowerCamelCase : Optional[Any] = _number_of_shards_in_gen_kwargs(lowerCamelCase__ )
if num_shards == 1:
return [dict(lowerCamelCase__ )]
else:
__lowerCamelCase : str = _distribute_shards(num_shards=lowerCamelCase__ , max_num_jobs=lowerCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> dict:
__lowerCamelCase : Tuple = {len(lowerCamelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
__lowerCamelCase : Optional[int] = {}
for size in list_sizes:
__lowerCamelCase : int = list(range(lowerCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__lowerCamelCase : List[str] = dict(lowerCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : str = [value[i] for i in indices_per_size[len(lowerCamelCase__ )]]
return shuffled_kwargs
| 73 | """simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a :
_snake_case : Tuple = PegasusConfig
_snake_case : int = {}
_snake_case : str = 'gelu'
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,):
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Optional[Any] = True
_snake_case : List[str] = False
_snake_case : Dict = False
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase )
_UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 289 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int ):
A = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _snake_case ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 | """simple docstring"""
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 2
_UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment
_UpperCAmelCase = [True] * (end + 1)
_UpperCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(lowercase )
for i in range(start * start ,end + 1 ,lowercase ):
_UpperCAmelCase = False
start += 1
prime += in_prime
_UpperCAmelCase = end + 1
_UpperCAmelCase = min(2 * end ,lowercase )
while low <= n:
_UpperCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_UpperCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowercase ,high + 1 ,lowercase ):
_UpperCAmelCase = False
for j in range(len(lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_UpperCAmelCase = high + 1
_UpperCAmelCase = min(high + end ,lowercase )
return prime
print(sieve(1_0**6))
| 289 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase, '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase, '''num_attention_heads''' ) )
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=640, lowerCAmelCase=4, lowerCAmelCase="silu", lowerCAmelCase=3, lowerCAmelCase=32, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0_2, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=10, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =last_hidden_size
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =conv_kernel_size
lowerCamelCase_ =output_stride
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =classifier_dropout_prob
lowerCamelCase_ =use_labels
lowerCamelCase_ =is_training
lowerCamelCase_ =num_labels
lowerCamelCase_ =initializer_range
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =MobileViTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =MobileViTForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =MobileViTForSemanticSegmentation(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : List[str] =(
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : int =False
lowercase : str =False
lowercase : Optional[Any] =False
lowercase : Union[str, Any] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileViTModelTester(self )
lowerCamelCase_ =MobileViTConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
lowerCamelCase_ =outputs.hidden_states
lowerCamelCase_ =5
self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase_ =2
for i in range(len(lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =True
check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =MobileViTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(lowerCAmelCase )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
# verify the logits
lowerCamelCase_ =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ =model.to(lowerCAmelCase )
lowerCamelCase_ =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
], device=lowerCAmelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCAmelCase, atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ =model.to(lowerCAmelCase )
lowerCamelCase_ =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
lowerCamelCase_ =outputs.logits.detach().cpu()
lowerCamelCase_ =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase, target_sizes=[(50, 60)] )
lowerCamelCase_ =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, lowerCAmelCase )
lowerCamelCase_ =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase )
lowerCamelCase_ =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, lowerCAmelCase )
| 75 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a : Optional[int] , a : str=13 , a : str=7 , a : List[Any]=True , a : List[str]=True , a : int=True , a : Any=True , a : Tuple=99 , a : int=32 , a : Union[str, Any]=5 , a : str=4 , a : Optional[Any]=37 , a : Optional[Any]="gelu" , a : Any=0.1 , a : Optional[Any]=0.1 , a : Any=512 , a : int=16 , a : Optional[int]=2 , a : Optional[int]=0.02 , a : str=4 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = num_choices
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =True
lowerCamelCase__ =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = FlaxRoFormerModelTester(self )
@slow
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=a )
SCREAMING_SNAKE_CASE : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
SCREAMING_SNAKE_CASE : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Tuple = model(a )[0]
SCREAMING_SNAKE_CASE : Optional[int] = 5_0000
SCREAMING_SNAKE_CASE : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Any = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1e-4 ) ) | 76 | """simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def run_func(lowercase ):
@wraps(lowercase )
def run_in_eager_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
@wraps(lowercase )
@tf.function(experimental_compile=lowercase )
def run_in_graph_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class a ( lowerCAmelCase_ ):
_snake_case : TensorFlowBenchmarkArguments
_snake_case : PretrainedConfig
_snake_case : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCAmelCase , training=__lowerCAmelCase )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
__lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_UpperCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_UpperCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(__lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase )
_UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 289 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Tuple = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Dict = AlbertTokenizer
lowerCamelCase__ : str = AlbertTokenizerFast
lowerCamelCase__ : Any = True
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Dict = True
def _UpperCAmelCase ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Any = AlbertTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = 'this is a test'
lowercase__ : Dict = 'this is a test'
return input_text, output_text
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Union[str, Any] = '<pad>'
lowercase__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(a ) , 3_0_0_0_0 )
def _UpperCAmelCase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : List[Any] = 'I was born in 92000, and this is falsé.'
lowercase__ : Union[str, Any] = tokenizer.tokenize(a )
lowercase__ : List[str] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a )
lowercase__ : Dict = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowercase__ : Optional[Any] = self.get_rust_tokenizer()
lowercase__ : Union[str, Any] = tokenizer.encode(a )
lowercase__ : List[str] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = AlbertTokenizer(a , keep_accents=a )
lowercase__ : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [4_8, 2_5, 2_1, 1_2_8_9] )
lowercase__ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
lowercase__ : List[Any] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = AlbertTokenizer(a )
lowercase__ : Any = tokenizer.encode('sequence builders' )
lowercase__ : Dict = tokenizer.encode('multi-sequence build' )
lowercase__ : str = tokenizer.build_inputs_with_special_tokens(a )
lowercase__ : Dict = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _UpperCAmelCase ( self ) -> Dict:
# fmt: off
lowercase__ : Tuple = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 77 | """simple docstring"""
from math import pow
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCAmelCase = int(pow(lowercase ,lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
return current_sum, solutions_count
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *lowercase_ :Tuple , **lowercase_ :Optional[int] ) -> int:
pass
@is_pipeline_test
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self :Tuple , lowercase_ :int , lowercase_ :str , lowercase_ :Union[str, Any] ) -> Dict:
UpperCAmelCase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
UpperCAmelCase = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = vqa_pipeline(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}],
[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}],
] , )
@require_torch
def UpperCAmelCase__ ( self :Dict ) -> Optional[Any]:
UpperCAmelCase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
UpperCAmelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCAmelCase = 'How many cats are there?'
UpperCAmelCase = vqa_pipeline(image=lowercase_ , question='How many cats are there?' , top_k=2 )
self.assertEqual(
lowercase_ , [{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}, {'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}] )
UpperCAmelCase = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
lowercase_ , [{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}, {'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
UpperCAmelCase = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
UpperCAmelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCAmelCase = 'How many cats are there?'
UpperCAmelCase = vqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
UpperCAmelCase = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
UpperCAmelCase = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def UpperCAmelCase__ ( self :Tuple ) -> str:
pass
| 78 | """simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
UpperCAmelCase__ = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = EfficientNetConfig()
_UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = 10_00
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,)
return preprocessor
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase = sorted(set(lowercase ) )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )}
_UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase = """efficientnet.""" + item[1]
_UpperCAmelCase = """classifier.weight"""
_UpperCAmelCase = """classifier.bias"""
return key_mapping
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
_UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) )
else:
_UpperCAmelCase = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = model_classes[model_name](
include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,)
_UpperCAmelCase = original_model.trainable_variables
_UpperCAmelCase = original_model.non_trainable_variables
_UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase = param.numpy()
_UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase = get_efficientnet_config(lowercase )
_UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval()
_UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase = rename_keys(lowercase )
replace_params(lowercase ,lowercase ,lowercase )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase = convert_image_processor(lowercase )
_UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase = hf_model(**lowercase )
_UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase = False
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
_UpperCAmelCase = image.img_to_array(lowercase )
_UpperCAmelCase = np.expand_dims(lowercase ,axis=0 )
_UpperCAmelCase = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
UpperCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 289 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mvp'''
snake_case = ['''past_key_values''']
snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , __UpperCAmelCase : Any=50267 , __UpperCAmelCase : Tuple=1024 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Tuple=4096 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[Any]=4096 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : List[Any]=1024 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=1 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Tuple=100 , __UpperCAmelCase : Union[str, Any]=800 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = classifier_dropout
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = use_prompt
_A = prompt_length
_A = prompt_mid_dim
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __UpperCAmelCase ):
_A = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
| 79 | """simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a :
def __init__( self : Union[str, Any] ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
_UpperCAmelCase = probability
def lowerCAmelCase_ ( self : Optional[Any] ):
return list(self.connections )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = 0
_UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = Counter(graph.get_nodes() )
_UpperCAmelCase = start
for _ in range(lowercase ):
_UpperCAmelCase = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A , __A ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__A ) )
def _UpperCamelCase ( __A , __A , __A , __A ) -> bool:
'''simple docstring'''
if index == len(__A ):
return True
# Recursive Step
for i in range(__A ):
if valid_coloring(graph[index] , __A , __A ):
# Color current vertex
UpperCamelCase__ = i
# Validate coloring
if util_color(__A , __A , __A , index + 1 ):
return True
# Backtrack
UpperCamelCase__ = -1
return False
def _UpperCamelCase ( __A , __A ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = [-1] * len(__A )
if util_color(__A , __A , __A , 0 ):
return colored_vertices
return []
| 80 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCAmelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 289 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be trained."} )
__lowerCAmelCase = field(
default="./", metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path of training dataset."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} )
__lowerCAmelCase = field(default=2, metadata={"help": "Batch size for training."} )
__lowerCAmelCase = field(default=2, metadata={"help": "Batch size for evaluation."} )
__lowerCAmelCase = field(default=0.1, metadata={"help": "Value of weight decay."} )
__lowerCAmelCase = field(
default=10000, metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
__lowerCAmelCase = field(default=2e-4, metadata={"help": "Learning rate fo training."} )
__lowerCAmelCase = field(default="cosine", metadata={"help": "Learning rate."} )
__lowerCAmelCase = field(
default=750, metadata={"help": "Number of warmup steps in the learning rate schedule."} )
__lowerCAmelCase = field(
default=16, metadata={"help": "Number of gradient accumulation steps."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
__lowerCAmelCase = field(default=50000, metadata={"help": "Maximum number of training steps."} )
__lowerCAmelCase = field(
default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__lowerCAmelCase = field(default=1024, metadata={"help": "Sequence lengths used for training."} )
__lowerCAmelCase = field(default=1, metadata={"help": "Training seed."} )
__lowerCAmelCase = field(
default=1024, metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "States path if the training should continue from a checkpoint folder."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} )
__lowerCAmelCase = field(default=2, metadata={"help": "Batch size used for evaluation."} )
__lowerCAmelCase = field(
default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__lowerCAmelCase = field(default=1024, metadata={"help": "Length of sequences to be evaluated."} )
__lowerCAmelCase = field(default=1, metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Number of workers used for code evaluation."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Sample from the language model's output distribution."} )
__lowerCAmelCase = field(default=0.2, metadata={"help": "Sampling temperature used for generation."} )
__lowerCAmelCase = field(default=256, metadata={"help": "Maximum number of newly generated tokens."} )
__lowerCAmelCase = field(default=0, metadata={"help": "Top-k parameter used for generation."} )
__lowerCAmelCase = field(default=0.9_5, metadata={"help": "Top-p parameter used for nucleus sampling."} )
__lowerCAmelCase = field(default=10, metadata={"help": "Number of generations to run in parallel."} )
__lowerCAmelCase = field(
default=200, metadata={"help": "Number of completions to generate for each sample."} )
__lowerCAmelCase = field(default=1, metadata={"help": "Random seed used for evaluation."} )
__lowerCAmelCase = field(
default="eval_results.json", metadata={"help": "Random seed used for evaluation."} )
__lowerCAmelCase = field(
default="0", metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
__lowerCAmelCase = field(
default=-1, metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
}, )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
}, )
__lowerCAmelCase = field(
default="transformersbook/codeparrot", metadata={"help": "Folder or name of dataset to process."} )
__lowerCAmelCase = field(
default="codeparrot-clean", metadata={"help": "Folder to save processed processed dataset."} )
__lowerCAmelCase = field(
default=100000, metadata={"help": "Number of files to save per JSON output file."} )
__lowerCAmelCase = field(default="content", metadata={"help": "Column containing text data to process."} )
__lowerCAmelCase = field(
default=1000, metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=100, metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=0.2_5, metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=1.5, metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=0.7, metadata={"help": "Probability for filtering config, test and uncommon files."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "If True, near-duplicate samples are removed."} )
__lowerCAmelCase = field(
default=0.8_5, metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="gpt2", metadata={"help": "Base tokenizer to build new tokenizer from."} )
__lowerCAmelCase = field(
default="transformersbook/codeparrot-train", metadata={"help": "Dataset to train tokenizer on."} )
__lowerCAmelCase = field(default="content", metadata={"help": "Column containing text data to process."} )
__lowerCAmelCase = field(default=200000, metadata={"help": "Number of examples to train tokenizer on."} )
__lowerCAmelCase = field(
default=32768, metadata={"help": "Number of examples to train the tokenizer on."} )
__lowerCAmelCase = field(default="codeparrot", metadata={"help": "Name of new tokenizer."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path to the dataset to pretokenize."} )
__lowerCAmelCase = field(
default="tokenized-codeparrot-train", metadata={"help": "Repo name of the pretokenized data."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="gpt2-large", metadata={"help": "Configuration to use for model initialization."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Tokenizer attached to model."} )
__lowerCAmelCase = field(default="codeparrot", metadata={"help": "Name of the created model."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Push saved tokenizer to the hub."} ) | 81 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'efficientnet'
def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-5
| 289 | 0 |
A__ = """Input must be a string of 8 numbers plus letter"""
A__ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
_lowerCAmelCase = F'Expected string as input, found {type(snake_case ).__name__}'
raise TypeError(snake_case )
_lowerCAmelCase = spanish_id.replace("""-""" , """""" ).upper()
if len(snake_case ) != 9:
raise ValueError(snake_case )
try:
_lowerCAmelCase = int(spanish_id_clean[0:8] )
_lowerCAmelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(snake_case ) from ex
if letter.isdigit():
raise ValueError(snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | """simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_hidden_groups
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Union[str, Any] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ):
_UpperCAmelCase = AlbertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case : Tuple = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Dict = True
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = AlbertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
| 289 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ ):
return len(set(UpperCAmelCase_ ) ) == len(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | """simple docstring"""
UpperCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Return True if there is node that has not iterated.
_UpperCAmelCase = [False] * len(lowercase )
_UpperCAmelCase = [s]
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [-1] * (len(lowercase ))
_UpperCAmelCase = 0
_UpperCAmelCase = []
_UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = float("""Inf""" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(lowercase ,graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 289 | 0 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ : Any , lowercase__ : int , lowercase__ : Tuple ) -> Any:
'''simple docstring'''
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowerCAmelCase_ :str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCAmelCase_ :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowerCAmelCase_ :Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCAmelCase_ :str = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowerCAmelCase_ :Dict = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCAmelCase_ :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowerCAmelCase_ :Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Optional[int]=False ) -> List[Any]:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ :Tuple = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowerCAmelCase_ :Optional[Any] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowerCAmelCase_ :List[Any] = (wi_a, wi_a)
else:
lowerCAmelCase_ :List[str] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def _snake_case ( lowercase__ : int , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool , lowercase__ : bool = False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :int = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ :Optional[int] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ :Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ :List[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ :Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :List[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ :int = layer_norm
lowerCAmelCase_ :Optional[int] = k.T
lowerCAmelCase_ :List[Any] = o.T
lowerCAmelCase_ :Dict = q.T
lowerCAmelCase_ :List[str] = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ :Union[str, Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :str = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ :List[str] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :Tuple = wi[0].T
lowerCAmelCase_ :List[Any] = wi[1].T
else:
lowerCAmelCase_ :Dict = wi.T
lowerCAmelCase_ :str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ :Optional[Any] = tax_relpos_bias_lookup(
lowercase__ , lowercase__ , """encoder""" ).T
lowerCAmelCase_ :Tuple = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowerCAmelCase_ :List[str] = tax_relpos_bias_lookup(
lowercase__ , 0 , """encoder""" ).T
lowerCAmelCase_ :Dict = tax_relpos_bias_lookup(
lowercase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :Union[str, Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ :Any = layer_norm
lowerCAmelCase_ :str = k.T
lowerCAmelCase_ :str = o.T
lowerCAmelCase_ :str = q.T
lowerCAmelCase_ :List[Any] = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ :Tuple = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ :str = layer_norm
lowerCAmelCase_ :Optional[int] = k.T
lowerCAmelCase_ :Optional[int] = o.T
lowerCAmelCase_ :Dict = q.T
lowerCAmelCase_ :Union[str, Any] = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ :Optional[int] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :str = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ :str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :Optional[int] = wi[0].T
lowerCAmelCase_ :Tuple = wi[1].T
else:
lowerCAmelCase_ :str = wi.T
lowerCAmelCase_ :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ :Dict = tax_relpos_bias_lookup(lowercase__ , lowercase__ , """decoder""" ).T
lowerCAmelCase_ :int = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ :Dict = old["""decoder/logits_dense/kernel"""].T
return new
def _snake_case ( lowercase__ : Any , lowercase__ : bool ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Any = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""]
return state_dict
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : int , lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ :List[Any] = convert_tax_to_pytorch(
lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ , scalable_attention=lowercase__ )
lowerCAmelCase_ :Optional[Any] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : bool = False , lowercase__ : bool = False , ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Any = MTaConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ :Tuple = UMTaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ :Union[str, Any] = UMTaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 84 | """simple docstring"""
import math
class a :
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ):
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCAmelCase ( ):
"""simple docstring"""
# Training Examples ( m, n )
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 289 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | """simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = MPNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_snake_case : Union[str, Any] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : int = False
_snake_case : List[Any] = True
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MPNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 289 | 0 |
"""simple docstring"""
lowerCamelCase__ = 65_521
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = 0
for plain_chr in plain_text:
__lowerCAmelCase : int = (a + ord(_UpperCamelCase )) % MOD_ADLER
__lowerCAmelCase : List[str] = (b + a) % MOD_ADLER
return (b << 16) | a | 86 | """simple docstring"""
UpperCAmelCase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = from_type.lower().strip("""s""" )
_UpperCAmelCase = to_type.lower().strip("""s""" )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 ,lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=None , _lowerCamelCase : Union[str, Any]=None , ):
if attention_mask is None:
lowercase__ : Dict = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
lowercase__ : Any = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
lowercase__ : Optional[int] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_lowerCamelCase)
if decoder_head_mask is None:
lowercase__ : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase)
if cross_attn_head_mask is None:
lowercase__ : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class snake_case_ :
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=13 , lowercase_ : Any=7 , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=False , lowercase_ : Any=99 , lowercase_ : Tuple=16 , lowercase_ : int=2 , lowercase_ : List[str]=4 , lowercase_ : List[str]=4 , lowercase_ : Any="relu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.0 , lowercase_ : Dict=0.0 , lowercase_ : List[str]=20 , lowercase_ : Dict=2 , lowercase_ : int=1 , lowercase_ : Dict=0 , ) -> str:
lowercase__ : List[Any] = parent
lowercase__ : int = batch_size
lowercase__ : Optional[Any] = seq_length
lowercase__ : Dict = is_training
lowercase__ : List[str] = use_labels
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = encoder_layerdrop
lowercase__ : Optional[int] = decoder_layerdrop
lowercase__ : Dict = max_position_embeddings
lowercase__ : List[Any] = eos_token_id
lowercase__ : Dict = pad_token_id
lowercase__ : Union[str, Any] = bos_token_id
def __UpperCamelCase ( self : Optional[Any] ) -> int:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Optional[int] = self.eos_token_id # Eos Token
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase__ : Union[str, Any] = input_ids.clamp(self.pad_token_id + 1 )
lowercase__ : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase__ : Tuple = self.get_config()
lowercase__ : Tuple = prepare_mam_aaa_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __UpperCamelCase ( self : List[Any] ) -> Any:
lowercase__ , lowercase__ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCamelCase ( self : int , lowercase_ : List[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : str = MaMaaaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval()
lowercase__ : Tuple = inputs_dict["input_ids"]
lowercase__ : List[Any] = inputs_dict["attention_mask"]
lowercase__ : Any = inputs_dict["head_mask"]
# first forward pass
lowercase__ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
lowercase__ , lowercase__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowercase__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowercase__ : Tuple = model(lowercase_ , attention_mask=lowercase_ )["last_hidden_state"]
lowercase__ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[
"last_hidden_state"
]
# select random slice
lowercase__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-2 ) )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Dict:
lowercase__ : Optional[Any] = MaMaaaModel(config=lowercase_ ).to(lowercase_ ).eval()
lowercase__ : str = model(**lowercase_ )
lowercase__ : Union[str, Any] = outputs.encoder_last_hidden_state
lowercase__ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : List[str] = model.get_encoder()
encoder.save_pretrained(lowercase_ )
lowercase__ : int = MaMaaaEncoder.from_pretrained(lowercase_ ).to(lowercase_ )
lowercase__ : str = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[int] = model.get_decoder()
decoder.save_pretrained(lowercase_ )
lowercase__ : int = MaMaaaDecoder.from_pretrained(lowercase_ ).to(lowercase_ )
lowercase__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowercase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : Optional[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__A : Dict = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__A : str = True
__A : List[str] = True
__A : List[Any] = False
__A : List[Any] = False
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : int ) -> str:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __UpperCamelCase ( self : List[Any] ) -> Dict:
lowercase__ : List[Any] = MaMaaaModelTester(self )
lowercase__ : str = ConfigTester(self , config_class=lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase__ , lowercase__ : int = model_class.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertEqual(info["missing_keys"] , [] )
def __UpperCamelCase ( self : Dict ) -> str:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowercase__ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : List[Any] = copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_ ) )
if not self.is_encoder_decoder:
lowercase__ : Optional[int] = inputs["input_ids"]
del inputs["input_ids"]
else:
lowercase__ : str = inputs["input_ids"]
lowercase__ : Optional[Any] = inputs.get("decoder_input_ids" , lowercase_ )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , lowercase_ )
lowercase__ : Optional[int] = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowercase__ : int = wte(lowercase_ )
else:
lowercase__ : str = wte(lowercase_ )
lowercase__ : str = wte(lowercase_ )
with torch.no_grad():
model(**lowercase_ )[0]
def __UpperCamelCase ( self : Tuple ) -> str:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
lowercase__ : Any = input_dict["input_ids"]
lowercase__ : List[Any] = input_ids.ne(1 ).to(lowercase_ )
lowercase__ : Optional[int] = MaMaaaForConditionalGeneration(lowercase_ ).eval().to(lowercase_ )
if torch_device == "cuda":
model.half()
model.generate(lowercase_ , attention_mask=lowercase_ )
model.generate(num_beams=4 , do_sample=lowercase_ , early_stopping=lowercase_ , num_return_sequences=3 )
def lowercase_ ( _lowerCamelCase : Any):
return torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase)
UpperCamelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class snake_case_ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Tuple:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
lowercase__ : int = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
lowercase__ : Optional[Any] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
lowercase__ : int = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
lowercase__ : Any = model(**lowercase_ )[0]
lowercase__ : Any = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
lowercase__ : Tuple = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
# change to intended input
lowercase__ : Optional[Any] = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
lowercase__ : Any = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
lowercase__ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
lowercase__ : List[str] = model(**lowercase_ )[0]
lowercase__ : Optional[Any] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
lowercase__ : List[Any] = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
lowercase__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
lowercase__ : int = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
lowercase__ : Union[str, Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
lowercase__ : str = model.generate(
input_ids=dct["input_ids"].to(lowercase_ ) , attention_mask=dct["attention_mask"].to(lowercase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
lowercase__ : Optional[int] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
lowercase__ : List[str] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase_ , skip_special_tokens=lowercase_ )
assert generated == expected_en
| 87 | """simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
_UpperCAmelCase = 2
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 289 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = [10, 20, 30, 40, 50, 60]
__magic_name__ = [2, 4, 6, 8, 10, 12]
__magic_name__ = 100
self.assertEqual(kp.calc_profit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 210 )
def _lowercase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(UpperCamelCase__ , """max_weight must greater than zero.""" )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(UpperCamelCase__ , """Weight can not be negative.""" )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(UpperCamelCase__ , """Profit can not be negative.""" )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.assertRaisesRegex(UpperCamelCase__ , """max_weight must greater than zero.""" )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(
UpperCamelCase__ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 88 | """simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'upernet'
def __init__( self : List[Any] ,_UpperCAmelCase : str=None ,_UpperCAmelCase : List[Any]=512 ,_UpperCAmelCase : int=0.02 ,_UpperCAmelCase : int=[1, 2, 3, 6] ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Dict=0.4 ,_UpperCAmelCase : Optional[int]=384 ,_UpperCAmelCase : Optional[Any]=256 ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : Tuple=255 ,**_UpperCAmelCase : int ,):
super().__init__(**_UpperCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : List[Any] = backbone_config.get('model_type' )
_a : List[str] = CONFIG_MAPPING[backbone_model_type]
_a : Optional[int] = config_class.from_dict(_UpperCAmelCase )
_a : Optional[int] = backbone_config
_a : Union[str, Any] = hidden_size
_a : str = initializer_range
_a : Any = pool_scales
_a : str = use_auxiliary_head
_a : Tuple = auxiliary_loss_weight
_a : Optional[Any] = auxiliary_in_channels
_a : Union[str, Any] = auxiliary_channels
_a : List[str] = auxiliary_num_convs
_a : List[str] = auxiliary_concat_input
_a : Any = loss_ignore_index
def __lowercase ( self : Optional[int] ):
_a : int = copy.deepcopy(self.__dict__ )
_a : List[Any] = self.backbone_config.to_dict()
_a : Dict = self.__class__.model_type
return output
| 89 | """simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = UNetaDModel
_snake_case : List[str] = 'sample'
@property
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : List[Any] ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = UNetaDModel
_snake_case : Optional[Any] = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self : Dict ):
return (4, 32, 32)
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : str ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model_accelerate.to(__lowerCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
_UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase )
model_normal_load.to(__lowerCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__lowerCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) )
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[Any] = UNetaDModel
_snake_case : str = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Any ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**__lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : List[str] ):
# not required for this model
pass
| 289 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=224 , lowerCamelCase__=1_000 , lowerCamelCase__=[3, 3, 6, 4] , lowerCamelCase__=[48, 56, 112, 220] , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = num_labels
__lowerCamelCase = image_size
__lowerCamelCase = layer_depths
__lowerCamelCase = embed_dims
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCamelCase__ , layer_scale_init_value=1e-5 , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = SwiftFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = SwiftFormerForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__lowerCamelCase = SwiftFormerForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
snake_case_ = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = SwiftFormerModelTester(self )
__lowerCamelCase = ConfigTester(
self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = SwiftFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = 8
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCamelCase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
def _config_zero_init(lowerCamelCase__ ):
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCamelCase__ , lowerCamelCase__ , 1e-10 )
if isinstance(getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ):
__lowerCamelCase = _config_zero_init(getattr(lowerCamelCase__ , lowerCamelCase__ ) )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return configs_no_init
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 90 | """simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = StableUnCLIPPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 32
_UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 289 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self : Tuple , lowercase_ : bool = True , lowercase_ : Optional[Dict[str, int]] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : str , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'''shortest_edge''': 256}
SCREAMING_SNAKE_CASE_ : str = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : int = get_size_dict(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize
SCREAMING_SNAKE_CASE_ : int = size
SCREAMING_SNAKE_CASE_ : List[Any] = resample
SCREAMING_SNAKE_CASE_ : int = do_center_crop
SCREAMING_SNAKE_CASE_ : List[Any] = crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE_ : str = do_normalize
SCREAMING_SNAKE_CASE_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
SCREAMING_SNAKE_CASE_ : int = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_)
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str]):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : ImageInput , lowercase_ : Optional[bool] = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[float] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : int = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [to_numpy_array(lowercase_) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Tuple = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : int = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
SCREAMING_SNAKE_CASE_ : Any = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
SCREAMING_SNAKE_CASE_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 91 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ = logging.getLogger()
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("-f" )
__lowerCAmelCase = parser.parse_args()
return args.f
class a__ ( snake_case__ ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(_A , "argv" , _A ):
__lowerCAmelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_A , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(_A )
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(_A )
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(_A )
| 92 | """simple docstring"""
import requests
UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/"""
def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 289 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 22 ):
"""simple docstring"""
lowercase_ : Tuple = range(1 , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = range(1 , __SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(1_0, 2_2) = }""")
| 93 | """simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_failure_array(lowercase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(lowercase ):
if pattern[j] == text[i]:
if j == (len(lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while j < len(lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(lowercase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase__ = """abc1abc12"""
UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase__ = """ABABX"""
UpperCAmelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase__ = """AAAB"""
UpperCAmelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase__ = """abcdabcy"""
UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 289 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
snake_case : Optional[Any] = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , ):
a :Optional[int] = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase , _lowerCamelCase ) )]
if identifier is not None:
a :Optional[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for n_ in n_identifier:
a :Optional[Any] = [file for file in files if n_ not in file]
else:
a :Optional[int] = [file for file in files if n_identifier not in file]
a :List[Any] = ignore_files or []
ignore_files.append('''__init__.py''' )
a :Union[str, Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , _lowerCamelCase )
if only_modules:
a :Dict = file.split('''.''' )[0]
try:
a :int = getattr(_lowerCamelCase , _lowerCamelCase )
a :List[Any] = doctest.DocTestSuite(_lowerCamelCase )
a :int = unittest.TextTestRunner().run(_lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
a :Any = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = Path('''src/transformers''' )
a :Any = '''modeling'''
a :Optional[Any] = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase , ignore_files=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = Path('''src/transformers''' )
a :Any = '''tokenization'''
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = Path('''src/transformers''' )
a :Union[str, Any] = '''configuration'''
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = Path('''src/transformers''' )
a :Any = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(_lowerCamelCase , n_identifier=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = Path('''docs/source''' )
a :int = ['''favicon.ico''']
self.analyze_directory(_lowerCamelCase , ignore_files=_lowerCamelCase , only_modules=_lowerCamelCase )
| 94 | """simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
UpperCAmelCase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
UpperCAmelCase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ):
_UpperCAmelCase = recall_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , )
return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
| 289 | 0 |
UpperCAmelCase : Optional[int] = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 95 | """simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a :
_snake_case : Tuple = PegasusConfig
_snake_case : int = {}
_snake_case : str = 'gelu'
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,):
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Optional[Any] = True
_snake_case : List[str] = False
_snake_case : Dict = False
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase )
_UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 289 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyInpaintPipeline
lowerCamelCase__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowerCamelCase__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowerCamelCase__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCamelCase__ = False
@property
def A_ ( self ):
return 32
@property
def A_ ( self ):
return 32
@property
def A_ ( self ):
return self.time_input_dim
@property
def A_ ( self ):
return self.time_input_dim * 4
@property
def A_ ( self ):
return 100
@property
def A_ ( self ):
_lowerCamelCase : Tuple = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCamelCase : str = MultilingualCLIP(lowercase )
_lowerCamelCase : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[str] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**lowercase )
return model
@property
def A_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.dummy_text_encoder
_lowerCamelCase : Optional[Any] = self.dummy_tokenizer
_lowerCamelCase : Optional[int] = self.dummy_unet
_lowerCamelCase : Tuple = self.dummy_movq
_lowerCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
_lowerCamelCase : List[str] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A_ ( self , lowercase , lowercase=0 ):
_lowerCamelCase : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase ) ).to(lowercase )
_lowerCamelCase : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase )
# create init_image
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase : List[str] = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCamelCase : Tuple = np.ones((64, 64) , dtype=np.floataa )
_lowerCamelCase : Optional[int] = 0
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : List[Any] = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : str = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def A_ ( self ):
_lowerCamelCase : List[Any] = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : List[str] = self.pipeline_class(**lowercase )
_lowerCamelCase : Optional[int] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : int = pipe(**self.get_dummy_inputs(lowercase ) )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : Dict = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : List[Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
_lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
_lowerCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCamelCase : str = np.ones((768, 768) , dtype=np.floataa )
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = 'a hat'
_lowerCamelCase : Optional[int] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
_lowerCamelCase : List[Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
_lowerCamelCase : Optional[int] = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCamelCase : int = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase ) | 96 | """simple docstring"""
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 2
_UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment
_UpperCAmelCase = [True] * (end + 1)
_UpperCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(lowercase )
for i in range(start * start ,end + 1 ,lowercase ):
_UpperCAmelCase = False
start += 1
prime += in_prime
_UpperCAmelCase = end + 1
_UpperCAmelCase = min(2 * end ,lowercase )
while low <= n:
_UpperCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_UpperCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowercase ,high + 1 ,lowercase ):
_UpperCAmelCase = False
for j in range(len(lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_UpperCAmelCase = high + 1
_UpperCAmelCase = min(high + end ,lowercase )
return prime
print(sieve(1_0**6))
| 289 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def a ( ) -> Generator[int, None, None]:
'''simple docstring'''
UpperCamelCase__ :dict[int, int] = {}
UpperCamelCase__ :Tuple = 2
while True:
UpperCamelCase__ :str = factor_map.pop(__a , __a )
if factor:
UpperCamelCase__ :List[str] = factor + prime
while x in factor_map:
x += factor
UpperCamelCase__ :Optional[Any] = factor
else:
UpperCamelCase__ :List[str] = prime
yield prime
prime += 1
def a ( __a = 1e10 ) -> int:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = sieve()
UpperCamelCase__ :str = 1
while True:
UpperCamelCase__ :Optional[Any] = next(__a )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__a )
n += 2
if __name__ == "__main__":
print(solution()) | 97 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289 | 0 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def a_ ( lowerCamelCase ): # picklable for multiprocessing
return x.sum()
def a_ ( lowerCamelCase ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 1
UpperCAmelCase__ = [1, 2]
UpperCAmelCase__ = {'a': 1, 'b': 2}
UpperCAmelCase__ = {'a': [1, 2], 'b': [3, 4]}
UpperCAmelCase__ = {'a': {'1': 1}, 'b': 2}
UpperCAmelCase__ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 2
UpperCAmelCase__ = [2, 3]
UpperCAmelCase__ = {'a': 2, 'b': 3}
UpperCAmelCase__ = {'a': [2, 3], 'b': [4, 5]}
UpperCAmelCase__ = {'a': {'1': 2}, 'b': 3}
UpperCAmelCase__ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ) ,lowerCamelCase__ )
UpperCAmelCase__ = 2
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
UpperCAmelCase__ = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
UpperCAmelCase__ = {'a': 2, 'b': 0, 'c': 2}
UpperCAmelCase__ = {
'a': np.eye(2 ).astype(lowerCamelCase__ ),
'b': np.zeros(3 ).astype(lowerCamelCase__ ),
'c': np.ones(2 ).astype(lowerCamelCase__ ),
}
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,map_numpy=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ ,lowerCamelCase__ ,map_numpy=lowerCamelCase__ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
self.assertEqual(map_nested(lowerCamelCase__ ,lowerCamelCase__ ,map_numpy=lowerCamelCase__ ,num_proc=lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ ,lowerCamelCase__ ,map_numpy=lowerCamelCase__ ,num_proc=lowerCamelCase__ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
with self.assertRaises(lowerCamelCase__ ): # can't pickle a local lambda
map_nested(lambda lowerCamelCase__ : x + 1 ,lowerCamelCase__ ,num_proc=lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = {'a': 1, 'b': 2}
UpperCAmelCase__ = {'a': 3, 'b': 4}
UpperCAmelCase__ = {'a': 5, 'b': 6}
UpperCAmelCase__ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
class snake_case :
"""simple docstring"""
snake_case__ = "bar"
UpperCAmelCase__ = Foo()
self.assertEqual(foo.my_attr ,'bar' )
with temporary_assignment(lowerCamelCase__ ,'my_attr' ,'BAR' ):
self.assertEqual(foo.my_attr ,'BAR' )
self.assertEqual(foo.my_attr ,'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
UpperCAmelCase__ = {f'''{i}''': i for i in range(lowerCamelCase )}
UpperCAmelCase__ = map_nested(lambda lowerCamelCase : x + 1_0 , lowerCamelCase , num_proc=lowerCamelCase , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
@require_tf
def __lowerCAmelCase ( self : Any ):
import tensorflow as tf
from tensorflow.keras import layers
UpperCAmelCase__ = layers.Dense(2 )
def gen_random_output():
UpperCAmelCase__ = tf.random.uniform((1, 3) )
return model(lowerCamelCase__ ).numpy()
with temp_seed(42 ,set_tensorflow=lowerCamelCase__ ):
UpperCAmelCase__ = gen_random_output()
with temp_seed(42 ,set_tensorflow=lowerCamelCase__ ):
UpperCAmelCase__ = gen_random_output()
UpperCAmelCase__ = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ ,lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@require_torch
def __lowerCAmelCase ( self : Dict ):
import torch
def gen_random_output():
UpperCAmelCase__ = torch.nn.Linear(3 ,2 )
UpperCAmelCase__ = torch.rand(1 ,3 )
return model(lowerCamelCase__ ).detach().numpy()
with temp_seed(42 ,set_pytorch=lowerCamelCase__ ):
UpperCAmelCase__ = gen_random_output()
with temp_seed(42 ,set_pytorch=lowerCamelCase__ ):
UpperCAmelCase__ = gen_random_output()
UpperCAmelCase__ = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ ,lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
def __lowerCAmelCase ( self : str ):
def gen_random_output():
return np.random.rand(1 ,3 )
with temp_seed(42 ):
UpperCAmelCase__ = gen_random_output()
with temp_seed(42 ):
UpperCAmelCase__ = gen_random_output()
UpperCAmelCase__ = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ ,lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@pytest.mark.parametrize('input_data' , [{}] )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = NestedDataStructure(lowerCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = NestedDataStructure(lowerCamelCase ).flatten()
assert output == expected_output
def a_ ( ):
UpperCAmelCase__ = A(x=1 , y='foobar' )
UpperCAmelCase__ = {'x': 1, 'y': 'foobar'}
assert asdict(lowerCamelCase ) == expected_output
UpperCAmelCase__ = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]}
UpperCAmelCase__ = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]}
assert asdict(lowerCamelCase ) == expected_output
with pytest.raises(lowerCamelCase ):
asdict([1, A(x=1_0 , y='foo' )] )
def a_ ( lowerCamelCase ):
return text.split()
def a_ ( lowerCamelCase ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def a_ ( ):
with Pool(2 ) as pool:
UpperCAmelCase__ = list(iflatmap_unordered(lowerCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(lowerCamelCase ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCAmelCase__ = list(iflatmap_unordered(lowerCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(lowerCamelCase ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCAmelCase__ = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(lowerCamelCase ) == 4
| 98 | """simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def run_func(lowercase ):
@wraps(lowercase )
def run_in_eager_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
@wraps(lowercase )
@tf.function(experimental_compile=lowercase )
def run_in_graph_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class a ( lowerCAmelCase_ ):
_snake_case : TensorFlowBenchmarkArguments
_snake_case : PretrainedConfig
_snake_case : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCAmelCase , training=__lowerCAmelCase )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
__lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_UpperCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_UpperCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(__lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase )
_UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 289 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase : Dict = get_logger(__name__)
class A__ :
"""simple docstring"""
def __init__( self , lowercase = None) -> str:
'''simple docstring'''
a__ : Union[str, Any] = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
a__ : Union[str, Any] = Extractor
def __lowercase ( self , lowercase) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
a__ : str = os.path.abspath(lowercase)
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase))
def __lowercase ( self , lowercase , lowercase) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(lowercase) and not (os.path.isdir(lowercase) and os.listdir(lowercase))
)
def __lowercase ( self , lowercase , lowercase = False) -> str:
'''simple docstring'''
a__ : List[Any] = self.extractor.infer_extractor_format(lowercase)
if not extractor_format:
return input_path
a__ : Union[str, Any] = self._get_output_path(lowercase)
if self._do_extract(lowercase , lowercase):
self.extractor.extract(lowercase , lowercase , lowercase)
return output_path
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@classmethod
@abstractmethod
def __lowercase ( cls , lowercase , **lowercase) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
...
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__A : List[bytes] = []
@staticmethod
def __lowercase ( lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
with open(lowercase , 'rb') as f:
return f.read(lowercase)
@classmethod
def __lowercase ( cls , lowercase , lowercase = b"") -> bool:
'''simple docstring'''
if not magic_number:
a__ : Dict = max(len(lowercase) for cls_magic_number in cls.magic_numbers)
try:
a__ : List[Any] = cls.read_magic_number(lowercase , lowercase)
except OSError:
return False
return any(magic_number.startswith(lowercase) for cls_magic_number in cls.magic_numbers)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls , lowercase , **lowercase) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(lowercase)
@staticmethod
def __lowercase ( lowercase , lowercase) -> Dict:
'''simple docstring'''
def resolved(lowercase) -> str:
return os.path.realpath(os.path.abspath(lowercase))
def badpath(lowercase , lowercase) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase)).startswith(lowercase)
def badlink(lowercase , lowercase) -> bool:
# Links are interpreted relative to the directory containing the link
a__ : Optional[int] = resolved(os.path.join(lowercase , os.path.dirname(info.name)))
return badpath(info.linkname , base=lowercase)
a__ : int = resolved(lowercase)
for finfo in members:
if badpath(finfo.name , lowercase):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)')
elif finfo.issym() and badlink(lowercase , lowercase):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}')
elif finfo.islnk() and badlink(lowercase , lowercase):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}')
else:
yield finfo
@staticmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
os.makedirs(lowercase , exist_ok=lowercase)
a__ : Optional[Any] = tarfile.open(lowercase)
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase))
tar_file.close()
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[str] = [b'''\x1F\x8B''']
@staticmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
with gzip.open(lowercase , 'rb') as gzip_file:
with open(lowercase , 'wb') as extracted_file:
shutil.copyfileobj(lowercase , lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[int] = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowercase ( cls , lowercase , lowercase = b"") -> bool:
'''simple docstring'''
if super().is_extractable(lowercase , magic_number=lowercase):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , 'rb') as fp:
a__ : Tuple = _EndRecData(lowercase)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
a__ : Dict = fp.read(lowercase) # CD is where we expect it to be
if len(lowercase) == sizeCentralDir:
a__ : Any = struct.unpack(lowercase , lowercase) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
os.makedirs(lowercase , exist_ok=lowercase)
with zipfile.ZipFile(lowercase , 'r') as zip_file:
zip_file.extractall(lowercase)
zip_file.close()
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[Any] = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
with lzma.open(lowercase) as compressed_file:
with open(lowercase , 'wb') as extracted_file:
shutil.copyfileobj(lowercase , lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile')
import rarfile
os.makedirs(lowercase , exist_ok=lowercase)
a__ : Union[str, Any] = rarfile.RarFile(lowercase)
rf.extractall(lowercase)
rf.close()
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[int] = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard')
import zstandard as zstd
a__ : str = zstd.ZstdDecompressor()
with open(lowercase , 'rb') as ifh, open(lowercase , 'wb') as ofh:
dctx.copy_stream(lowercase , lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = [b'''\x42\x5A\x68''']
@staticmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
with bza.open(lowercase , 'rb') as compressed_file:
with open(lowercase , 'wb') as extracted_file:
shutil.copyfileobj(lowercase , lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Union[str, Any] = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr')
import pyazr
os.makedirs(lowercase , exist_ok=lowercase)
with pyazr.SevenZipFile(lowercase , 'r') as archive:
archive.extractall(lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Any = [b'''\x04\x22\x4D\x18''']
@staticmethod
def __lowercase ( lowercase , lowercase) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4')
import lza.frame
with lza.frame.open(lowercase , 'rb') as compressed_file:
with open(lowercase , 'wb') as extracted_file:
shutil.copyfileobj(lowercase , lowercase)
class A__ :
"""simple docstring"""
__A : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowercase ( cls) -> str:
'''simple docstring'''
return max(
len(lowercase)
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def __lowercase ( lowercase , lowercase) -> str:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase)
except OSError:
return b""
@classmethod
def __lowercase ( cls , lowercase , lowercase = False) -> bool:
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=lowercase , )
a__ : Dict = cls.infer_extractor_format(lowercase)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowercase ( cls , lowercase) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
a__ : int = cls._get_magic_number_max_length()
a__ : Optional[Any] = cls._read_magic_number(lowercase , lowercase)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase):
return extractor_format
@classmethod
def __lowercase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(lowercase) , exist_ok=lowercase)
# Prevent parallel extractions
a__ : Optional[Any] = str(Path(lowercase).with_suffix('.lock'))
with FileLock(lowercase):
shutil.rmtree(lowercase , ignore_errors=lowercase)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=lowercase , )
a__ : Optional[int] = extractor if extractor != 'deprecated' else extractor_format
else:
a__ : Dict = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase)
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase):
return extractor.extract(lowercase , lowercase)
| 99 | """simple docstring"""
from math import pow
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCAmelCase = int(pow(lowercase ,lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
return current_sum, solutions_count
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCamelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _distribute_shards(**UpperCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _split_gen_kwargs(UpperCamelCase_ , UpperCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if expected is RuntimeError:
with pytest.raises(UpperCamelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCamelCase_ )
else:
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(UpperCamelCase_ )
assert out == expected
| 100 | """simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
UpperCAmelCase__ = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = EfficientNetConfig()
_UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = 10_00
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,)
return preprocessor
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase = sorted(set(lowercase ) )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )}
_UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase = """efficientnet.""" + item[1]
_UpperCAmelCase = """classifier.weight"""
_UpperCAmelCase = """classifier.bias"""
return key_mapping
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
_UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) )
else:
_UpperCAmelCase = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = model_classes[model_name](
include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,)
_UpperCAmelCase = original_model.trainable_variables
_UpperCAmelCase = original_model.non_trainable_variables
_UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase = param.numpy()
_UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase = get_efficientnet_config(lowercase )
_UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval()
_UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase = rename_keys(lowercase )
replace_params(lowercase ,lowercase ,lowercase )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase = convert_image_processor(lowercase )
_UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase = hf_model(**lowercase )
_UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase = False
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
_UpperCAmelCase = image.img_to_array(lowercase )
_UpperCAmelCase = np.expand_dims(lowercase ,axis=0 )
_UpperCAmelCase = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
UpperCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 289 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ :str = 2
class lowercase :
def __init__( self ,*, # begin keyword-only arguments
A__="<s>" ,A__="<pad>" ,A__="</s>" ,A__="<unk>" ,A__=None ,):
lowercase , lowercase , lowercase , lowercase = bos, unk, pad, eos
lowercase = []
lowercase = []
lowercase = {}
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(A__)
lowercase = len(self.symbols)
def __eq__( self ,A__):
return self.indices == other.indices
def __getitem__( self ,A__):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
return len(self.symbols)
def __contains__( self ,A__):
return sym in self.indices
@classmethod
def A__ ( cls ,A__):
lowercase = cls()
d.add_from_file(A__)
return d
def A__ ( self ,A__ ,A__=1 ,A__=False):
if word in self.indices and not overwrite:
lowercase = self.indices[word]
lowercase = self.count[idx] + n
return idx
else:
lowercase = len(self.symbols)
lowercase = idx
self.symbols.append(A__)
self.count.append(A__)
return idx
def A__ ( self ,A__):
return 0
def A__ ( self ,A__):
if isinstance(A__ ,A__):
try:
with open(A__ ,'''r''' ,encoding='''utf-8''') as fd:
self.add_from_file(A__)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(A__))
return
lowercase = f.readlines()
lowercase = self._load_meta(A__)
for line in lines[indices_start_line:]:
try:
lowercase , lowercase = line.rstrip().rsplit(''' ''' ,1)
if field == "#fairseq:overwrite":
lowercase = True
lowercase , lowercase = line.rsplit(''' ''' ,1)
else:
lowercase = False
lowercase = int(A__)
lowercase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(A__))
self.add_symbol(A__ ,n=A__ ,overwrite=A__)
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''')
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase = dict((re.sub(R'''@@$''' , '''''' , lowerCAmelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , lowerCAmelCase__ ), v) for k, v in d.items() )
lowercase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
lowercase = d[k] # restore
return da
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# prep
if not os.path.exists(lowerCAmelCase__ ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase = os.path.join(lowerCAmelCase__ , '''checkpoint.pt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = chkpt['''cfg''']['''model''']
# dicts
lowercase = os.path.join(lowerCAmelCase__ , '''dict.txt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
lowercase = Dictionary.load(lowerCAmelCase__ )
lowercase = rewrite_dict_keys(src_dict.indices )
lowercase = len(lowerCAmelCase__ )
lowercase = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
lowercase = os.path.join(lowerCAmelCase__ , '''bpecodes''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
lowercase = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
# model config
lowercase = os.path.join(lowerCAmelCase__ , '''config.json''' )
lowercase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# tokenizer config
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# model
lowercase = chkpt['''model''']
# remove unneeded keys
lowercase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
lowercase = model_state_dict.pop(lowerCAmelCase__ )
else:
lowercase = model_state_dict.pop(lowerCAmelCase__ )
lowercase = BioGptConfig.from_pretrained(lowerCAmelCase__ )
lowercase = BioGptForCausalLM(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__ )
# save
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :Any = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 101 | """simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a :
def __init__( self : Union[str, Any] ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
_UpperCAmelCase = probability
def lowerCAmelCase_ ( self : Optional[Any] ):
return list(self.connections )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = 0
_UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = Counter(graph.get_nodes() )
_UpperCAmelCase = start
for _ in range(lowercase ):
_UpperCAmelCase = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
'''simple docstring'''
__snake_case : Union[str, Any] = parent
__snake_case : Dict = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : Tuple = is_training
__snake_case : Optional[int] = use_attention_mask
__snake_case : Dict = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : Tuple = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Any = num_choices
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Union[str, Any] = None
if self.use_attention_mask:
__snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] = None
if self.use_token_type_ids:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = config_and_inputs
__snake_case : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = FlaxAlbertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Union[str, Any] = model_class_name.from_pretrained('''albert-base-v2''' )
__snake_case : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
__snake_case : List[str] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case : Dict = model(a_ , attention_mask=a_ )[0]
__snake_case : Dict = (1, 11, 7_68)
self.assertEqual(output.shape , a_ )
__snake_case : int = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 102 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCAmelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 289 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = StableDiffusionSAGPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = False
def UpperCAmelCase__ ( self : str):
torch.manual_seed(0)
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowerCAmelCase_ : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0)
lowerCAmelCase_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
lowerCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCAmelCase_ : List[Any] = CLIPTextModel(A_)
lowerCAmelCase_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
lowerCAmelCase_ : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self : List[Any] , A_ : List[str] , A_ : Dict=0):
if str(A_).startswith('''mps'''):
lowerCAmelCase_ : str = torch.manual_seed(A_)
else:
lowerCAmelCase_ : str = torch.Generator(device=A_).manual_seed(A_)
lowerCAmelCase_ : Optional[int] = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : int):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''')
lowerCAmelCase_ : Optional[Any] = sag_pipe.to(A_)
sag_pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : Optional[Any] = '''.'''
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(0)
lowerCAmelCase_ : List[str] = sag_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''')
lowerCAmelCase_ : List[Any] = output.images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Any = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
lowerCAmelCase_ : Any = sag_pipe.to(A_)
sag_pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : Optional[Any] = '''.'''
lowerCAmelCase_ : str = torch.manual_seed(0)
lowerCAmelCase_ : Tuple = sag_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''')
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : int = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
lowerCAmelCase_ : int = sag_pipe.to(A_)
sag_pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : List[str] = '''.'''
lowerCAmelCase_ : str = torch.manual_seed(0)
lowerCAmelCase_ : Dict = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=A_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 103 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'efficientnet'
def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-5
| 289 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def _A ( A__ , A__=1.0 , A__=None , A__=None ):
"""simple docstring"""
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=7 ,lowercase__ : Dict=4_0_0 ,lowercase__ : Tuple=2_0_0_0 ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[int]=1_6_0 ,lowercase__ : Dict=8 ,lowercase__ : str=0.0 ,lowercase__ : Union[str, Any]=4_0_0_0 ,lowercase__ : Optional[int]=False ,lowercase__ : List[str]=True ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
__lowercase = feature_size
__lowercase = chunk_length
__lowercase = hop_length
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str]=False ,lowercase__ : Dict=False ):
def _flatten(lowercase__ : Optional[Any] ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = feat_extract_first.save_pretrained(lowercase__ )[0]
check_json_file_has_correct_format(lowercase__ )
__lowercase = self.feature_extraction_class.from_pretrained(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''feat_extract.json''' )
feat_extract_first.to_json_file(lowercase__ )
__lowercase = self.feature_extraction_class.from_json_file(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase__ ,padding='''max_length''' ,return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] ,return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase = np.asarray(lowercase__ )
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test truncation required
__lowercase = [floats_list((1, x) )[0] for x in range(2_0_0 ,(feature_extractor.n_samples + 5_0_0) ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
__lowercase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs_truncated]
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(1_0_0 ,3_2 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
# automatic decoding with librispeech
__lowercase = ds.sort('''id''' ).select(range(lowercase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
__lowercase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = WhisperFeatureExtractor()
__lowercase = feature_extractor(lowercase__ ,return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape ,(1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] ,lowercase__ ,atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = self._load_datasamples(1 )[0]
__lowercase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
__lowercase = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=lowercase__ )[0]
self.assertTrue(np.all(np.mean(lowercase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase__ ) - 1 ) < 1e-3 ) )
| 104 | """simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_hidden_groups
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Union[str, Any] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ):
_UpperCAmelCase = AlbertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case : Tuple = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Dict = True
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = AlbertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
| 289 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =None
lowerCamelCase : Dict =BloomTokenizerFast
lowerCamelCase : int =BloomTokenizerFast
lowerCamelCase : List[Any] =True
lowerCamelCase : Optional[int] =False
lowerCamelCase : Dict ="""tokenizer_file"""
lowerCamelCase : Union[str, Any] ={"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def __a ( self ) -> str:
super().setUp()
a : Dict = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , **lowerCAmelCase__ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Union[str, Any] = self.get_rust_tokenizer()
a : Dict = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
a : Optional[int] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
a : Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase__ )["input_ids"]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__=6 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a : Optional[Any] = "This is a simple input"
a : str = ["This is a simple input 1", "This is a simple input 2"]
a : int = ("This is a simple input", "This is a pair")
a : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
a : Tuple = None # Hotfixing padding = None
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , )
def __a ( self ) -> Optional[int]:
a : Optional[int] = self.get_rust_tokenizer()
a : Tuple = load_dataset("xnli" , "all_languages" , split="test" , streaming=lowerCAmelCase__ )
a : List[Any] = next(iter(lowerCAmelCase__ ) )["premise"] # pick up one data
a : Union[str, Any] = list(sample_data.values() )
a : Tuple = list(map(tokenizer.encode , lowerCAmelCase__ ) )
a : Union[str, Any] = [tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) for x in output_tokens]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 105 | """simple docstring"""
UpperCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Return True if there is node that has not iterated.
_UpperCAmelCase = [False] * len(lowercase )
_UpperCAmelCase = [s]
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [-1] * (len(lowercase ))
_UpperCAmelCase = 0
_UpperCAmelCase = []
_UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = float("""Inf""" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(lowercase ,graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 289 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Union[str, Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''BeitFeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 | """simple docstring"""
import math
class a :
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ):
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCAmelCase ( ):
"""simple docstring"""
# Training Examples ( m, n )
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 289 | 0 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__lowerCAmelCase : str = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None
SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None
SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a , a , a = _str_to_version_tuple(self.version_str )
def __repr__( self : List[str] ) -> Dict:
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def __UpperCAmelCase ( self : Tuple ) -> Any:
return self.major, self.minor, self.patch
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return Version(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return other
raise TypeError(f"""{other} (type {type(__lowerCamelCase )}) cannot be compared to version.""" )
def __eq__( self : int , __lowerCamelCase : Any ) -> Optional[int]:
try:
a = self._validate_operand(__lowerCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Dict:
a = self._validate_operand(__lowerCamelCase )
return self.tuple < other.tuple
def __hash__( self : Any ) -> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , __lowerCamelCase : Dict ) -> Any:
a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCAmelCase ( self : List[Any] ) -> str:
return self.version_str
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
a = _VERSION_REG.match(A )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(A ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __magic_name__ ( A : Optional[Any] ):
'''simple docstring'''
return ".".join(str(A ) for v in version_tuple )
| 107 | """simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = MPNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_snake_case : Union[str, Any] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : int = False
_snake_case : List[Any] = True
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MPNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 289 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''_T''')
class SCREAMING_SNAKE_CASE__ ( Generic[_T] ):
"""simple docstring"""
def __init__( self , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : list[_T] = list(iterable or [] )
lowerCAmelCase : list[_T] = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self._stacka.append(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self._stacka.pop
lowerCAmelCase : List[str] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 108 | """simple docstring"""
UpperCAmelCase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = from_type.lower().strip("""s""" )
_UpperCAmelCase = to_type.lower().strip("""s""" )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 ,lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 0 |
"""simple docstring"""
import re
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Dict = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(UpperCamelCase , UpperCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 109 | """simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
_UpperCAmelCase = 2
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 289 | 0 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_SCREAMING_SNAKE_CASE : Any = HUGGINGFACE_HUB_CACHE
_SCREAMING_SNAKE_CASE : Optional[Any] = "config.json"
_SCREAMING_SNAKE_CASE : Optional[int] = "diffusion_pytorch_model.bin"
_SCREAMING_SNAKE_CASE : int = "diffusion_flax_model.msgpack"
_SCREAMING_SNAKE_CASE : Tuple = "model.onnx"
_SCREAMING_SNAKE_CASE : Dict = "diffusion_pytorch_model.safetensors"
_SCREAMING_SNAKE_CASE : List[Any] = "weights.pb"
_SCREAMING_SNAKE_CASE : Dict = "https://huggingface.co"
_SCREAMING_SNAKE_CASE : int = default_cache_path
_SCREAMING_SNAKE_CASE : List[Any] = "diffusers_modules"
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_SCREAMING_SNAKE_CASE : int = ["fp16", "non-ema"]
_SCREAMING_SNAKE_CASE : Tuple = ".self_attn"
| 85 | """simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289 | 0 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE__ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = R""".*sequential.(\d+).*"""
lowerCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase = key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
lowerCAmelCase = re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).group(1 )
lowerCAmelCase = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(SCREAMING_SNAKE_CASE )//3}.linear.' )
elif re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = int(re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCAmelCase = 1 if projecton_layer == 0 else 2
lowerCAmelCase = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCAmelCase = value
lowerCAmelCase = mixed_qkv.size(0 ) // 3
lowerCAmelCase = mixed_qkv[:qkv_dim]
lowerCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCAmelCase = mixed_qkv[qkv_dim * 2 :]
lowerCAmelCase = query_layer
lowerCAmelCase = key_layer
lowerCAmelCase = value_layer
else:
lowerCAmelCase = value
return model_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = init_clap(SCREAMING_SNAKE_CASE , enable_fusion=SCREAMING_SNAKE_CASE )
clap_model.eval()
lowerCAmelCase = clap_model.state_dict()
lowerCAmelCase = rename_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase = ClapConfig()
lowerCAmelCase = enable_fusion
lowerCAmelCase = ClapModel(SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 46 | """simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = UNetaDModel
_snake_case : List[str] = 'sample'
@property
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : List[Any] ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = UNetaDModel
_snake_case : Optional[Any] = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self : Dict ):
return (4, 32, 32)
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : str ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model_accelerate.to(__lowerCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
_UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase )
model_normal_load.to(__lowerCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__lowerCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) )
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[Any] = UNetaDModel
_snake_case : str = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Any ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**__lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : List[str] ):
# not required for this model
pass
| 289 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='''utf-8''' ,check=__lowerCAmelCase ,)
assert hasattr(self ,'''env''' )
def __lowerCamelCase ( self :str ,__lowercase :Optional[Any]=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-single""" ,instance_count=__lowerCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__lowerCAmelCase ,hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version='''py36''' ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[Any] ):
TrainingJobAnalytics(__lowerCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def __lowerCamelCase ( self :str ):
# create estimator
snake_case__ : Dict = self.create_estimator()
# run training
estimator.fit()
# result dataframe
snake_case__ : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case__ : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
snake_case__ : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,__lowerCAmelCase )
| 230 | """simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = StableUnCLIPPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 32
_UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 289 | 0 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
_UpperCAmelCase : Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
_UpperCAmelCase : Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Any = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
_UpperCAmelCase : List[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCAmelCase : Dict = """fp16"""
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
_UpperCAmelCase : List[str] = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : str = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
_UpperCAmelCase : List[str] = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : int = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
_UpperCAmelCase : Any = """fp16"""
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 246 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289 | 0 |
'''simple docstring'''
def _A ( lowercase__ , lowercase__ ):
def get_matched_characters(lowercase__ , lowercase__ ) -> str:
lowercase__ = []
lowercase__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase__ = int(max(0 , i - limit ) )
lowercase__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase__ )
lowercase__ = f'''{_stra[0:_stra.index(lowercase__ )]} {_stra[_stra.index(lowercase__ ) + 1:]}'''
return "".join(lowercase__ )
# matching characters
lowercase__ = get_matched_characters(lowercase__ , lowercase__ )
lowercase__ = get_matched_characters(lowercase__ , lowercase__ )
lowercase__ = len(lowercase__ )
# transposition
lowercase__ = (
len([(ca, ca) for ca, ca in zip(lowercase__ , lowercase__ ) if ca != ca] ) // 2
)
if not match_count:
lowercase__ = 0.0
else:
lowercase__ = (
1
/ 3
* (
match_count / len(lowercase__ )
+ match_count / len(lowercase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 164 | """simple docstring"""
import requests
UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/"""
def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 289 | 0 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ : Dict = {'UserAgent': UserAgent().random}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = script.contents[0]
UpperCAmelCase__ = json.loads(data[data.find('{\"config\"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class snake_case :
"""simple docstring"""
def __init__( self : Dict ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCAmelCase__ = self.get_json()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = requests.get(self.url ,headers=__lowerCAmelCase ).text
UpperCAmelCase__ = BeautifulSoup(__lowerCAmelCase ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Dict ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : Optional[Any] ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self : Any ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self : int ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self : str ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self : str ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self : Dict ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self : List[str] ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self : List[Any] ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self : str ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self : Dict ):
return self.user_data["is_private"]
def a_ ( lowerCamelCase = "github" ):
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
UpperCAmelCase__ = InstagramUser(lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 98 | """simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_failure_array(lowercase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(lowercase ):
if pattern[j] == text[i]:
if j == (len(lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while j < len(lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(lowercase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase__ = """abc1abc12"""
UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase__ = """ABABX"""
UpperCAmelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase__ = """AAAB"""
UpperCAmelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase__ = """abcdabcy"""
UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 289 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ReformerTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = ReformerTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : str = True
def _a (self ):
super().setUp()
A_ : Optional[int] = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
A_ : List[str] = """<s>"""
A_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _a (self ):
A_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__lowerCAmelCase ) , 1000 )
def _a (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a (self ):
if not self.test_rust_tokenizer:
return
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : int = """I was born in 92000, and this is falsé."""
A_ : str = tokenizer.tokenize(__lowerCAmelCase )
A_ : str = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A_ : Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
A_ : Any = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A_ : List[Any] = self.get_rust_tokenizer()
A_ : Any = tokenizer.encode(__lowerCAmelCase )
A_ : Optional[int] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _a (self , lowercase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : Any = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
A_ : int = """This is a simple input"""
A_ : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ : Optional[Any] = ("""This is a simple input""", """This is a pair""")
A_ : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
def _a (self ):
pass
def _a (self ):
A_ : Optional[Any] = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
A_ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
A_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Tuple = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A_ : Optional[int] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _a (self ):
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def _a (self ):
A_ : Tuple = """Hello World!"""
A_ : Tuple = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def _a (self ):
A_ : Tuple = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A_ : Tuple = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) )
@require_torch
@slow
def _a (self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A_ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
A_ : List[str] = """ """.join(__lowerCAmelCase )
A_ : Dict = self.big_tokenizer.encode_plus(__lowerCAmelCase , return_tensors="""pt""" )
A_ : str = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
A_ : Union[str, Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A_ : Dict = encoded_sequence["""input_ids"""].shape
A_ : Tuple = ReformerModel(__lowerCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowerCAmelCase )
model(**__lowerCAmelCase )
@slow
def _a (self ):
# fmt: off
A_ : Union[str, Any] = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A_ : Optional[Any] = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=__lowerCAmelCase , sequences=__lowerCAmelCase , ) | 206 | """simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
UpperCAmelCase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
UpperCAmelCase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ):
_UpperCAmelCase = recall_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , )
return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
| 289 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCamelCase ( lowerCAmelCase_ ):
'''simple docstring'''
_A : int = 'donut-swin'
_A : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: int , snake_case: Optional[Any]=224 , snake_case: str=4 , snake_case: Any=3 , snake_case: Tuple=96 , snake_case: List[Any]=[2, 2, 6, 2] , snake_case: Optional[int]=[3, 6, 12, 24] , snake_case: List[Any]=7 , snake_case: Tuple=4.0 , snake_case: List[str]=True , snake_case: Tuple=0.0 , snake_case: Optional[Any]=0.0 , snake_case: Dict=0.1 , snake_case: int="gelu" , snake_case: Tuple=False , snake_case: int=0.0_2 , snake_case: Union[str, Any]=1E-5 , **snake_case: Optional[Any] , ) -> Optional[int]:
super().__init__(**__lowerCAmelCase )
snake_case_ :str = image_size
snake_case_ :List[Any] = patch_size
snake_case_ :int = num_channels
snake_case_ :Any = embed_dim
snake_case_ :Optional[int] = depths
snake_case_ :str = len(__lowerCAmelCase )
snake_case_ :Any = num_heads
snake_case_ :List[Any] = window_size
snake_case_ :Optional[int] = mlp_ratio
snake_case_ :Union[str, Any] = qkv_bias
snake_case_ :List[str] = hidden_dropout_prob
snake_case_ :Tuple = attention_probs_dropout_prob
snake_case_ :Optional[int] = drop_path_rate
snake_case_ :str = hidden_act
snake_case_ :List[Any] = use_absolute_embeddings
snake_case_ :Any = layer_norm_eps
snake_case_ :Dict = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ :Any = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
| 66 | """simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a :
_snake_case : Tuple = PegasusConfig
_snake_case : int = {}
_snake_case : str = 'gelu'
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,):
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Optional[Any] = True
_snake_case : List[str] = False
_snake_case : Dict = False
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase )
_UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 289 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase ( lowerCAmelCase_ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''num_attention_heads''' ) )
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=64 , UpperCamelCase_=3 , UpperCamelCase_=3 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=16 , UpperCamelCase_=[128, 256, 384] , UpperCamelCase_=[4, 6, 8] , UpperCamelCase_=[2, 3, 4] , UpperCamelCase_=[16, 16, 16] , UpperCamelCase_=0 , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=2 , ):
'''simple docstring'''
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Tuple = batch_size
UpperCamelCase__ :Optional[int] = image_size
UpperCamelCase__ :Union[str, Any] = num_channels
UpperCamelCase__ :Optional[Any] = kernel_size
UpperCamelCase__ :Union[str, Any] = stride
UpperCamelCase__ :List[str] = padding
UpperCamelCase__ :str = hidden_sizes
UpperCamelCase__ :Optional[int] = num_attention_heads
UpperCamelCase__ :Optional[int] = depths
UpperCamelCase__ :Tuple = key_dim
UpperCamelCase__ :List[Any] = drop_path_rate
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :List[Any] = attention_ratio
UpperCamelCase__ :Dict = mlp_ratio
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :Optional[int] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ :str = is_training
UpperCamelCase__ :Dict = use_labels
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = initializer_range
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Optional[int] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ :str = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = LevitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ :str = model(__lowerCAmelCase )
UpperCamelCase__ :List[str] = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ :Dict = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ :Dict = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ :Union[str, Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :int = LevitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ :Tuple = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = config_and_inputs
UpperCamelCase__ :Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_a = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = LevitModelTester(self )
UpperCamelCase__ :Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Any = model_class(__lowerCAmelCase )
UpperCamelCase__ :Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Any = [*signature.parameters.keys()]
UpperCamelCase__ :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCamelCase__ :List[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
UpperCamelCase__ :List[str] = outputs.hidden_states
UpperCamelCase__ :str = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
UpperCamelCase__ :Optional[int] = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ :Union[str, Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ :Optional[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase__ , UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :str = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :Optional[int] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
UpperCamelCase__ :Tuple = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ :Union[str, Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
UpperCamelCase__ :Dict = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
UpperCamelCase__ :Tuple = model(**__lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ :Union[str, Any] = False
UpperCamelCase__ :Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ :Any = model_class(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCAmelCase )
model.train()
UpperCamelCase__ :Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
UpperCamelCase__ :Union[str, Any] = model(**__lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Any = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCamelCase__ :int = problem_type['''title''']
UpperCamelCase__ :str = problem_type['''num_labels''']
UpperCamelCase__ :Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
UpperCamelCase__ :Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase__ :int = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
UpperCamelCase__ :Optional[int] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCAmelCase ) as warning_list:
UpperCamelCase__ :Tuple = model(**__lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Any = LevitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCAmelCase )
UpperCamelCase__ :Optional[Any] = self.default_image_processor
UpperCamelCase__ :int = prepare_img()
UpperCamelCase__ :List[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase__ :List[str] = model(**__lowerCAmelCase )
# verify the logits
UpperCamelCase__ :Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
UpperCamelCase__ :Any = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) ) | 97 | """simple docstring"""
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 2
_UpperCAmelCase = int(math.sqrt(lowercase ) ) # Size of every segment
_UpperCAmelCase = [True] * (end + 1)
_UpperCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(lowercase )
for i in range(start * start ,end + 1 ,lowercase ):
_UpperCAmelCase = False
start += 1
prime += in_prime
_UpperCAmelCase = end + 1
_UpperCAmelCase = min(2 * end ,lowercase )
while low <= n:
_UpperCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_UpperCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowercase ,high + 1 ,lowercase ):
_UpperCAmelCase = False
for j in range(len(lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_UpperCAmelCase = high + 1
_UpperCAmelCase = min(high + end ,lowercase )
return prime
print(sieve(1_0**6))
| 289 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_a = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''DPTFeatureExtractor''']
_a = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( lowerCAmelCase_ ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 196 | """simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def run_func(lowercase ):
@wraps(lowercase )
def run_in_eager_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
@wraps(lowercase )
@tf.function(experimental_compile=lowercase )
def run_in_graph_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class a ( lowerCAmelCase_ ):
_snake_case : TensorFlowBenchmarkArguments
_snake_case : PretrainedConfig
_snake_case : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCAmelCase , training=__lowerCAmelCase )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
__lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_UpperCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_UpperCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(__lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase )
_UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 289 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.